diff --git a/.gitattributes b/.gitattributes index e7066a724343..e001f9f8af01 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,4 @@ .aspect/** -diff build/bazelutil/distdir_files.bzl -diff pkg/BUILD.bazel -diff +pkg/protos.bzl -diff diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 981268ccd093..818c9fbeb367 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -38,6 +38,7 @@ /Makefile @cockroachdb/dev-inf +#!/pkg @cockroachdb/unowned #!/pkg/sql/ @cockroachdb/sql-queries-noreview /pkg/sql/inverted/ @cockroachdb/sql-queries-prs diff --git a/.github/workflows/github-actions-essential-ci.yml b/.github/workflows/github-actions-essential-ci.yml index 73c2051e7a52..6bb8490b87e4 100644 --- a/.github/workflows/github-actions-essential-ci.yml +++ b/.github/workflows/github-actions-essential-ci.yml @@ -1,5 +1,6 @@ name: GitHub Actions Essential CI on: + merge_group: pull_request: types: [opened, reopened, synchronize] branches: @@ -346,6 +347,8 @@ jobs: if: always() cockroach-microbench-ci: runs-on: [ self-hosted, basic_runner_group ] + # TODO(sambhav-jain-16): enable this for pull requests also + if: ${{ github.event_name == 'push' }} timeout-minutes: 60 steps: - uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index 798961c4c5a6..af6d232c838b 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ artifacts /certs # make stress, acceptance produce stress.test, acceptance.test *.test* +pkg/cmd/roachtest/_runner-logs # fuzz tests work-Fuzz* *-fuzz.zip @@ -41,6 +42,8 @@ customenv.mk # Generated files containing include paths. zcgo_flags*.go build/Railroad.jar +# Legacy `make` content. +build/variables.mk # Bazel generated symlinks /_bazel diff --git a/AUTHORS b/AUTHORS index 3907b2e0654a..2f243f0c6e9b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -434,6 +434,7 @@ Sai Ravula Sajjad Rizvi Sam Huang Sambhav Jain sambhav-jain-16 +Sanchit Khanna Sankt Petersbug Sean Barag sjbarag Sean Chittenden @@ -533,3 +534,4 @@ Zachary Smith Zachary.smith Zane Teh 何羿宏 智雅楠 +Mohini Pandey diff --git a/DEPS.bzl b/DEPS.bzl index e5e5f786a0c6..65eb9b7edb6c 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -483,70 +483,100 @@ def go_deps(): name = "com_github_aws_aws_sdk_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2", - sha256 = "a000599cb80b0466affe6baf2fa98d79258ee7a4fe05c12c7819bcd44a349bd0", - strip_prefix = "github.com/aws/aws-sdk-go-v2@v1.19.0", + sha256 = "738e20fcbcfa2f419f5adc8c4724abf0da6164f13270033a7c92d77c852fe189", + strip_prefix = "github.com/aws/aws-sdk-go-v2@v1.30.4", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/com_github_aws_aws_sdk_go_v2-v1.19.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/com_github_aws_aws_sdk_go_v2-v1.30.4.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream", + sha256 = "480949d2a72f1c6172b9bf01c289a5e0850ec62dadd62a6f1e03708551a07210", + strip_prefix = "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream@v1.6.4", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream-v1.6.4.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_config", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/config", - sha256 = "0c8ba4bf7ac47ac043998c3bdcd76289cf62dcae24e72cbef1bdf6cf2da7b922", - strip_prefix = "github.com/aws/aws-sdk-go-v2/config@v1.18.28", + sha256 = "5d063d3793ae595bdc5c3f5cdac9f8015009cecbe1ddfe2280d00004bdd8c76c", + strip_prefix = "github.com/aws/aws-sdk-go-v2/config@v1.27.31", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/config/com_github_aws_aws_sdk_go_v2_config-v1.18.28.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/config/com_github_aws_aws_sdk_go_v2_config-v1.27.31.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_credentials", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/credentials", - sha256 = "cdeb51df11f42e1b979add3cf290b01682b271e720fb7e06e8f0aad42680cff7", - strip_prefix = "github.com/aws/aws-sdk-go-v2/credentials@v1.13.27", + sha256 = "9a631647ab7d062fed94e3e24f3bb6f24646786054106f7d1f3d6307fe6732a3", + strip_prefix = "github.com/aws/aws-sdk-go-v2/credentials@v1.17.30", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/credentials/com_github_aws_aws_sdk_go_v2_credentials-v1.13.27.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/credentials/com_github_aws_aws_sdk_go_v2_credentials-v1.17.30.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_feature_ec2_imds", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds", - sha256 = "b3b0740921b7b2832aaa7525d4c1f2580de3601c05dc0198aff4970046fd4fd0", - strip_prefix = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds@v1.13.5", + sha256 = "7f5b3ae98e905d96256fc742c0538f18f145a381d7d5ebebf22f2d8c09874fd9", + strip_prefix = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds@v1.16.12", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/com_github_aws_aws_sdk_go_v2_feature_ec2_imds-v1.13.5.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/com_github_aws_aws_sdk_go_v2_feature_ec2_imds-v1.16.12.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_feature_s3_manager", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/feature/s3/manager", + sha256 = "a922bbd49b42eeb5cd4cb0df9d269ab7284d0c12467fffd5d36caa310238a910", + strip_prefix = "github.com/aws/aws-sdk-go-v2/feature/s3/manager@v1.17.16", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/s3/manager/com_github_aws_aws_sdk_go_v2_feature_s3_manager-v1.17.16.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_configsources", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/internal/configsources", - sha256 = "5c794460a1ac545ea1283ee4bf214465d208082375c682f32d440399f182960a", - strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/configsources@v1.1.35", + sha256 = "1166aee3aba666df11717e5d48b7c5f8c7a9647ea9970b25b2508400cd186ba1", + strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/configsources/com_github_aws_aws_sdk_go_v2_internal_configsources-v1.1.35.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/configsources/com_github_aws_aws_sdk_go_v2_internal_configsources-v1.3.16.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_endpoints_v2", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2", - sha256 = "b234d84e6759f7e47c0c7ce6608f428acb69fd008468e885cc2393be0bdbf557", - strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.4.29", + sha256 = "e8ffd7b794ba83ff7eafd5c90d9c12ee9ffb5101c8018067f1ca8dfdda6bf8ff", + strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/com_github_aws_aws_sdk_go_v2_internal_endpoints_v2-v2.4.29.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/com_github_aws_aws_sdk_go_v2_internal_endpoints_v2-v2.6.16.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_internal_ini", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/internal/ini", - sha256 = "3cc3aa1dbe66b81bf189276f680ec6f6a1fc7668e8e828dae50764c58a72e4a8", - strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/ini@v1.3.36", + sha256 = "30ceb160c10eee87c002f89ce5a89100463ec2935a980a3652fc53fff4efe21a", + strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/ini@v1.8.1", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/ini/com_github_aws_aws_sdk_go_v2_internal_ini-v1.8.1.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_v4a", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/internal/v4a", + sha256 = "9f044285cf122648dd733eeca16bafa3bb915bd93cf8f9dda530ff7ded3d9442", + strip_prefix = "github.com/aws/aws-sdk-go-v2/internal/v4a@v1.3.16", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/ini/com_github_aws_aws_sdk_go_v2_internal_ini-v1.3.36.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/v4a/com_github_aws_aws_sdk_go_v2_internal_v4a-v1.3.16.zip", ], ) go_repository( @@ -583,14 +613,54 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/iam/com_github_aws_aws_sdk_go_v2_service_iam-v1.18.3.zip", ], ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding", + sha256 = "2a75cd6211d8ee88f65c55df4ce8849491ec587b694015f3e7becc079a50ab9b", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding@v1.11.4", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding-v1.11.4.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_checksum", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/checksum", + sha256 = "6a91d3c79a89fac1491ae3a37373596df4720917d02a2db5101fb161751e2775", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/internal/checksum@v1.3.18", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/checksum/com_github_aws_aws_sdk_go_v2_service_internal_checksum-v1.3.18.zip", + ], + ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_internal_presigned_url", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url", - sha256 = "cdd979528b2fdddbd9b0eaec969f33f6407d1ac9388631df7525cc9434d4dac3", - strip_prefix = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.9.29", + sha256 = "97ea77a8eab61c8a7018148d2435c9d629918a75d50dd82db3f38c3311ea31d6", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.11.18", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/com_github_aws_aws_sdk_go_v2_service_internal_presigned_url-v1.11.18.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_s3shared", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/s3shared", + sha256 = "65083f6cf54a986cdc8a070a0f2abbde44c6dbf8e494f936291d5c55b14e89d1", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/internal/s3shared@v1.17.16", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/com_github_aws_aws_sdk_go_v2_service_internal_presigned_url-v1.9.29.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/com_github_aws_aws_sdk_go_v2_service_internal_s3shared-v1.17.16.zip", + ], + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_kms", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/kms", + sha256 = "48402147f5da42fa55b4e7d61322db3906b3e7944289531c8e1fed906c639e3c", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/kms@v1.35.5", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/kms/com_github_aws_aws_sdk_go_v2_service_kms-v1.35.5.zip", ], ) go_repository( @@ -603,6 +673,16 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/rds/com_github_aws_aws_sdk_go_v2_service_rds-v1.18.4.zip", ], ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_s3", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/s3", + sha256 = "0f8751f1eaee1fc296f2892cf2d28c1f7c0eaaa7cb06666e8e704832a01c9577", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/s3@v1.61.0", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/s3/com_github_aws_aws_sdk_go_v2_service_s3-v1.61.0.zip", + ], + ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_secretsmanager", build_file_proto_mode = "disable_global", @@ -617,40 +697,40 @@ def go_deps(): name = "com_github_aws_aws_sdk_go_v2_service_sso", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/service/sso", - sha256 = "8ca0d1458087d636596bfb9182ad4a18f52aa5a960e8600e8aeeb0d52d2a33eb", - strip_prefix = "github.com/aws/aws-sdk-go-v2/service/sso@v1.12.13", + sha256 = "9c4003dd15799bdc71c02fe5d0c67c72e4eaa625be1d3678f3aaa9984352cae3", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/sso@v1.22.5", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sso/com_github_aws_aws_sdk_go_v2_service_sso-v1.12.13.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sso/com_github_aws_aws_sdk_go_v2_service_sso-v1.22.5.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_ssooidc", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/service/ssooidc", - sha256 = "fb6d9039c24f3448afdb516d44b4730ba1279ca91bd10c2edfea26a020a8385b", - strip_prefix = "github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.14.13", + sha256 = "3c1e0b5e33db7ebf1adc363c31b14a91d00a89ed87f15dcd76a43300d7cc85ca", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.26.5", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/ssooidc/com_github_aws_aws_sdk_go_v2_service_ssooidc-v1.14.13.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/ssooidc/com_github_aws_aws_sdk_go_v2_service_ssooidc-v1.26.5.zip", ], ) go_repository( name = "com_github_aws_aws_sdk_go_v2_service_sts", build_file_proto_mode = "disable_global", importpath = "github.com/aws/aws-sdk-go-v2/service/sts", - sha256 = "c7904f761ff6f3d00a4e544a9427aba19032239a8053ee03309bdbed0cd4ffd5", - strip_prefix = "github.com/aws/aws-sdk-go-v2/service/sts@v1.19.3", + sha256 = "7fa5bdfbe752f0b59584ef0b1300aa31aa561e3a733645636f415abb59bf9ba0", + strip_prefix = "github.com/aws/aws-sdk-go-v2/service/sts@v1.30.5", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sts/com_github_aws_aws_sdk_go_v2_service_sts-v1.19.3.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sts/com_github_aws_aws_sdk_go_v2_service_sts-v1.30.5.zip", ], ) go_repository( name = "com_github_aws_smithy_go", build_file_proto_mode = "disable_global", importpath = "github.com/aws/smithy-go", - sha256 = "864667edd8459bff1f750d65f40922263b995a06dcb85240536539e239e911e4", - strip_prefix = "github.com/aws/smithy-go@v1.13.5", + sha256 = "6cf9bffdb7c793f43125f498a47ea6d37191e7a0aa81435635f47fb75dadc978", + strip_prefix = "github.com/aws/smithy-go@v1.20.4", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/smithy-go/com_github_aws_smithy_go-v1.13.5.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/smithy-go/com_github_aws_smithy_go-v1.20.4.zip", ], ) go_repository( @@ -1625,10 +1705,10 @@ def go_deps(): name = "com_github_cockroachdb_crlib", build_file_proto_mode = "disable_global", importpath = "github.com/cockroachdb/crlib", - sha256 = "75022b33828ba2c915827a3e217ea5d19bd1e7c6aa702b344cf3d349ae7f31b4", - strip_prefix = "github.com/cockroachdb/crlib@v0.0.0-20240729155931-991150b7e290", + sha256 = "ab49848ce4daea2599f91fd17942826bf62532b6d71af8bf8a02f4dff97118f3", + strip_prefix = "github.com/cockroachdb/crlib@v0.0.0-20240816115810-1c502cdb7c1d", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/crlib/com_github_cockroachdb_crlib-v0.0.0-20240729155931-991150b7e290.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/crlib/com_github_cockroachdb_crlib-v0.0.0-20240816115810-1c502cdb7c1d.zip", ], ) go_repository( @@ -1718,10 +1798,10 @@ def go_deps(): patches = [ "@com_github_cockroachdb_cockroach//build/patches:com_github_cockroachdb_pebble.patch", ], - sha256 = "2684fa2648ae6aca2947fb7095ae8446a2f5f9473461b5d8d1edba1a883434da", - strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20240816131806-ac08db2964cd", + sha256 = "c74ef1cfeb721234f1cc8ee9e90506a975341b79c5b23ddf2cc6eddcbe1e5087", + strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20240904144802-32cf8823809b", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20240816131806-ac08db2964cd.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20240904144802-32cf8823809b.zip", ], ) go_repository( @@ -2348,20 +2428,20 @@ def go_deps(): name = "com_github_decred_dcrd_crypto_blake256", build_file_proto_mode = "disable_global", importpath = "github.com/decred/dcrd/crypto/blake256", - sha256 = "cd8bbdae14641f0ba44430fc66990dd37bbfcf1e21a965a9fd1871d16cac127d", - strip_prefix = "github.com/decred/dcrd/crypto/blake256@v1.0.0", + sha256 = "e4343d55494a93eb7bb7b59be9359fb8007fd36652b27a725db024f61605d515", + strip_prefix = "github.com/decred/dcrd/crypto/blake256@v1.0.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/crypto/blake256/com_github_decred_dcrd_crypto_blake256-v1.0.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/crypto/blake256/com_github_decred_dcrd_crypto_blake256-v1.0.1.zip", ], ) go_repository( name = "com_github_decred_dcrd_dcrec_secp256k1_v4", build_file_proto_mode = "disable_global", importpath = "github.com/decred/dcrd/dcrec/secp256k1/v4", - sha256 = "ce895eb53e69a058a77bc7e599fc43cef64b3c639ffec2f34faa7fef1331665d", - strip_prefix = "github.com/decred/dcrd/dcrec/secp256k1/v4@v4.1.0", + sha256 = "107cfef3902348214eb364253d75f569ea4c7a203d35eea50fa7ce10cd9cf710", + strip_prefix = "github.com/decred/dcrd/dcrec/secp256k1/v4@v4.3.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/dcrec/secp256k1/v4/com_github_decred_dcrd_dcrec_secp256k1_v4-v4.1.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/dcrec/secp256k1/v4/com_github_decred_dcrd_dcrec_secp256k1_v4-v4.3.0.zip", ], ) go_repository( @@ -3659,10 +3739,10 @@ def go_deps(): name = "com_github_goccy_go_json", build_file_proto_mode = "disable_global", importpath = "github.com/goccy/go-json", - sha256 = "ed9043ee01cc46557c74bcecc625db37ffe3a5c7af219f390a287f44a40c2520", - strip_prefix = "github.com/goccy/go-json@v0.10.2", + sha256 = "a14a4805bf6043dfd1e0e923d761f126e6c5a86c416f28f57bfebf1f8bde59e3", + strip_prefix = "github.com/goccy/go-json@v0.10.3", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/goccy/go-json/com_github_goccy_go_json-v0.10.2.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/goccy/go-json/com_github_goccy_go_json-v0.10.3.zip", ], ) go_repository( @@ -5526,10 +5606,10 @@ def go_deps(): patches = [ "@com_github_cockroachdb_cockroach//build/patches:com_github_kisielk_errcheck.patch", ], - sha256 = "99d3220891162cb684f8e05d54f3d0dc58abdd496a2f0cfda7fd4a28917a719e", - strip_prefix = "github.com/kisielk/errcheck@v1.6.1-0.20210625163953-8ddee489636a", + sha256 = "f394d1df1f2332387ce142d98734c5c44fb94e9a8a2af2a9b75aa4ec4a64b963", + strip_prefix = "github.com/kisielk/errcheck@v1.7.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kisielk/errcheck/com_github_kisielk_errcheck-v1.6.1-0.20210625163953-8ddee489636a.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kisielk/errcheck/com_github_kisielk_errcheck-v1.7.0.zip", ], ) go_repository( @@ -5772,24 +5852,14 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/leodido/go-urn/com_github_leodido_go_urn-v1.2.1.zip", ], ) - go_repository( - name = "com_github_lestrrat_go_backoff_v2", - build_file_proto_mode = "disable_global", - importpath = "github.com/lestrrat-go/backoff/v2", - sha256 = "f5ded39eec777b7de879eb04204aa4b322683dabb22137862b09cb464f5bc617", - strip_prefix = "github.com/lestrrat-go/backoff/v2@v2.0.8", - urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/backoff/v2/com_github_lestrrat_go_backoff_v2-v2.0.8.zip", - ], - ) go_repository( name = "com_github_lestrrat_go_blackmagic", build_file_proto_mode = "disable_global", importpath = "github.com/lestrrat-go/blackmagic", - sha256 = "0621ab66f2510093f86f838db09a698027e8cbf08cc0e52bfa7d359b4f1b3745", - strip_prefix = "github.com/lestrrat-go/blackmagic@v1.0.1", + sha256 = "2baa5f21e1db4781a11d0ba2fbe8e71323c78875034da61687d80f47ae9c78ce", + strip_prefix = "github.com/lestrrat-go/blackmagic@v1.0.2", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/blackmagic/com_github_lestrrat_go_blackmagic-v1.0.1.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/blackmagic/com_github_lestrrat_go_blackmagic-v1.0.2.zip", ], ) go_repository( @@ -5802,6 +5872,16 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/httpcc/com_github_lestrrat_go_httpcc-v1.0.1.zip", ], ) + go_repository( + name = "com_github_lestrrat_go_httprc", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/httprc", + sha256 = "19c7a7bc6d63165e24a911182fe860166b75d4557262ef031d2fba8351b44707", + strip_prefix = "github.com/lestrrat-go/httprc@v1.0.6", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/httprc/com_github_lestrrat_go_httprc-v1.0.6.zip", + ], + ) go_repository( name = "com_github_lestrrat_go_iter", build_file_proto_mode = "disable_global", @@ -5813,23 +5893,23 @@ def go_deps(): ], ) go_repository( - name = "com_github_lestrrat_go_jwx", + name = "com_github_lestrrat_go_jwx_v2", build_file_proto_mode = "disable_global", - importpath = "github.com/lestrrat-go/jwx", - sha256 = "808d6fe46171b8d6bb0a44188d7886d864f1aad5ce29b8ce1c6048745183c645", - strip_prefix = "github.com/lestrrat-go/jwx@v1.2.25", + importpath = "github.com/lestrrat-go/jwx/v2", + sha256 = "f0ee5e8baf11f8d449ff3cb81b9c4421d4e437b2dc6f22d25001816b251d6d2f", + strip_prefix = "github.com/lestrrat-go/jwx/v2@v2.1.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/jwx/com_github_lestrrat_go_jwx-v1.2.25.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/jwx/v2/com_github_lestrrat_go_jwx_v2-v2.1.1.zip", ], ) go_repository( name = "com_github_lestrrat_go_option", build_file_proto_mode = "disable_global", importpath = "github.com/lestrrat-go/option", - sha256 = "8c42afc182c5697b8d5e38f5004ae522fa437247850ca2cf9fe65a6ae18bfaa9", - strip_prefix = "github.com/lestrrat-go/option@v1.0.0", + sha256 = "3e5614e160680053e07e4970e825e694c2a917741e735ab4d435a396b739ae78", + strip_prefix = "github.com/lestrrat-go/option@v1.0.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/option/com_github_lestrrat_go_option-v1.0.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/option/com_github_lestrrat_go_option-v1.0.1.zip", ], ) go_repository( @@ -8009,6 +8089,16 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/securego/gosec/com_github_securego_gosec-v0.0.0-20200203094520-d13bb6d2420c.zip", ], ) + go_repository( + name = "com_github_segmentio_asm", + build_file_proto_mode = "disable_global", + importpath = "github.com/segmentio/asm", + sha256 = "8e2815672f1ab3049b10185b5494006320c32afb419ccf9f14385bc25ea44def", + strip_prefix = "github.com/segmentio/asm@v1.2.0", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/segmentio/asm/com_github_segmentio_asm-v1.2.0.zip", + ], + ) go_repository( name = "com_github_segmentio_kafka_go", build_file_proto_mode = "disable_global", @@ -11356,10 +11446,10 @@ def go_deps(): name = "org_golang_x_crypto", build_file_proto_mode = "disable_global", importpath = "golang.org/x/crypto", - sha256 = "65d22b9f54aef5f7f064900d2ecf8d8b231729aebc46c3b7ca56ff897fb70b57", - strip_prefix = "golang.org/x/crypto@v0.23.0", + sha256 = "ec96acfe28be3ff2fb14201c5f51132f0e24c7d0d6f3201a8aa69c84f989d014", + strip_prefix = "golang.org/x/crypto@v0.26.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.23.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.26.0.zip", ], ) go_repository( @@ -11416,20 +11506,20 @@ def go_deps(): name = "org_golang_x_mod", build_file_proto_mode = "disable_global", importpath = "golang.org/x/mod", - sha256 = "98a122c92ad55deef674f6546b4c295ed93d106178dd24ec40449ae33b41037a", - strip_prefix = "golang.org/x/mod@v0.14.0", + sha256 = "3c3528c39639b7cd699c121c100ddb71ab49f94bff257a4a3935e3ae9e8571fc", + strip_prefix = "golang.org/x/mod@v0.20.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.14.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.20.0.zip", ], ) go_repository( name = "org_golang_x_net", build_file_proto_mode = "disable_global", importpath = "golang.org/x/net", - sha256 = "389940dbee4a10516de85368bb1a550d6df814ed1f893db18de8def9168147c7", - strip_prefix = "golang.org/x/net@v0.24.0", + sha256 = "c6f7bde4bb418d1f5ee5dc437d09ce9f10743ddba043cdca82eb57ddeb18d6da", + strip_prefix = "golang.org/x/net@v0.28.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.24.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.28.0.zip", ], ) go_repository( @@ -11456,40 +11546,50 @@ def go_deps(): name = "org_golang_x_sync", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sync", - sha256 = "20b01085240e661bffc7f59383f21b90f112d669784220c6e59c801243216d22", - strip_prefix = "golang.org/x/sync@v0.7.0", + sha256 = "c79473c265ca571d389bf64fa1e7b2d8999b4ab3eb7af5e3bc185644783a1087", + strip_prefix = "golang.org/x/sync@v0.8.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sync/org_golang_x_sync-v0.7.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sync/org_golang_x_sync-v0.8.0.zip", ], ) go_repository( name = "org_golang_x_sys", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sys", - sha256 = "3f826b191eab1ebda925feb551d334e37e1b5865d1aa790fade46598811a8b1a", - strip_prefix = "golang.org/x/sys@v0.20.0", + sha256 = "5bf721c4404580d5350d0a0297c1f48f07c05db8a0d2a20677e6cb295380b9a7", + strip_prefix = "golang.org/x/sys@v0.23.0", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.23.0.zip", + ], + ) + go_repository( + name = "org_golang_x_telemetry", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/telemetry", + sha256 = "8e8649337973d064cc44fa858787db7d0eb90f0806807349766d180ed6889f5c", + strip_prefix = "golang.org/x/telemetry@v0.0.0-20240521205824-bda55230c457", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.20.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/telemetry/org_golang_x_telemetry-v0.0.0-20240521205824-bda55230c457.zip", ], ) go_repository( name = "org_golang_x_term", build_file_proto_mode = "disable_global", importpath = "golang.org/x/term", - sha256 = "840eacc0ffb306dcb4b0f5bf6e071c91d2e7957fcc604eec4e73c0fc22f2920c", - strip_prefix = "golang.org/x/term@v0.20.0", + sha256 = "2597a62b487b952c11c89b2001551af1fe1d29c484388ec1c3f5e3be7ff58ba5", + strip_prefix = "golang.org/x/term@v0.23.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/term/org_golang_x_term-v0.20.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/term/org_golang_x_term-v0.23.0.zip", ], ) go_repository( name = "org_golang_x_text", build_file_proto_mode = "disable_global", importpath = "golang.org/x/text", - sha256 = "13faee7e46c8a18c8a28f3eceebf15db6d724b9a108c3c0482a6d2e58ba73a73", - strip_prefix = "golang.org/x/text@v0.15.0", + sha256 = "48464f2ab2f988ca8b7b0a9d098e3664224c3b128629b5a9cc08025ee4a7e4ec", + strip_prefix = "golang.org/x/text@v0.17.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/text/org_golang_x_text-v0.15.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/text/org_golang_x_text-v0.17.0.zip", ], ) go_repository( @@ -11506,10 +11606,10 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sha256 = "f4c5fce4cd013419429592043ce0549f47060dfe6f44cca940224cd48c3e28ad", - strip_prefix = "golang.org/x/tools@v0.17.0", + sha256 = "92607be1cacf4647fd31b19ee64b1a7c198178f1005c75371e38e7b08fb138e7", + strip_prefix = "golang.org/x/tools@v0.24.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.17.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.24.0.zip", ], ) go_repository( diff --git a/TEAMS.yaml b/TEAMS.yaml index 0ca157cfbd54..c24281ad4826 100644 --- a/TEAMS.yaml +++ b/TEAMS.yaml @@ -44,10 +44,6 @@ cockroachdb/kv: cockroachdb/kv-prs: other triage_column_id: 14242655 label: T-kv -cockroachdb/replication: - aliases: - cockroachdb/repl-prs: other - label: T-kv-replication cockroachdb/spatial: triage_column_id: 9487269 label: T-spatial @@ -69,9 +65,9 @@ cockroachdb/test-eng-prs: triage_column_id: 14041337 label: T-testeng cockroachdb/security: - label: T-cross-product-security + label: T-security-engineering cockroachdb/prodsec: - label: T-cross-product-security + label: T-security-engineering cockroachdb/product-security: label: T-product-security cockroachdb/disaster-recovery: diff --git a/build/bazelutil/bazel-generate.sh b/build/bazelutil/bazel-generate.sh index 24a68666be0e..a83b79bb0088 100755 --- a/build/bazelutil/bazel-generate.sh +++ b/build/bazelutil/bazel-generate.sh @@ -85,5 +85,5 @@ fi if files_unchanged_from_upstream $(find_relevant ./pkg -name BUILD.bazel) $(find_relevant ./pkg/cmd/generate-bazel-extra -name BUILD.bazel -or -name '*.go'); then echo "Skipping //pkg/cmd/generate-bazel-extra (relevant files are unchanged from upstream)." else - bazel run //pkg/cmd/generate-bazel-extra --run_under="cd $PWD && " ${EXTRA_BAZEL_ARGS:-} -- -gen_test_suites + bazel run //pkg/cmd/generate-bazel-extra --run_under="cd $PWD && " ${EXTRA_BAZEL_ARGS:-} fi diff --git a/build/bazelutil/distdir_files.bzl b/build/bazelutil/distdir_files.bzl index ea4df37425be..7e2ec50eca81 100644 --- a/build/bazelutil/distdir_files.bzl +++ b/build/bazelutil/distdir_files.bzl @@ -243,24 +243,32 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/atotto/clipboard/com_github_atotto_clipboard-v0.1.4.zip": "d67b2c36c662751309fd2ec351df3651584bea840bd27be9a90702c3a238b43f", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-lambda-go/com_github_aws_aws_lambda_go-v1.13.3.zip": "8cfc5400798abd2840f456c75265f8fba4ae488e32ca2af9a5c8073fb219ea82", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-msk-iam-sasl-signer-go/com_github_aws_aws_msk_iam_sasl_signer_go-v1.0.0.zip": "b5f99e40aae3664b1a58b312efda28e432b4e976dd3296e24520cc79b9651a14", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/com_github_aws_aws_sdk_go_v2-v1.19.0.zip": "a000599cb80b0466affe6baf2fa98d79258ee7a4fe05c12c7819bcd44a349bd0", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/config/com_github_aws_aws_sdk_go_v2_config-v1.18.28.zip": "0c8ba4bf7ac47ac043998c3bdcd76289cf62dcae24e72cbef1bdf6cf2da7b922", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/credentials/com_github_aws_aws_sdk_go_v2_credentials-v1.13.27.zip": "cdeb51df11f42e1b979add3cf290b01682b271e720fb7e06e8f0aad42680cff7", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/com_github_aws_aws_sdk_go_v2_feature_ec2_imds-v1.13.5.zip": "b3b0740921b7b2832aaa7525d4c1f2580de3601c05dc0198aff4970046fd4fd0", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/configsources/com_github_aws_aws_sdk_go_v2_internal_configsources-v1.1.35.zip": "5c794460a1ac545ea1283ee4bf214465d208082375c682f32d440399f182960a", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/com_github_aws_aws_sdk_go_v2_internal_endpoints_v2-v2.4.29.zip": "b234d84e6759f7e47c0c7ce6608f428acb69fd008468e885cc2393be0bdbf557", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/ini/com_github_aws_aws_sdk_go_v2_internal_ini-v1.3.36.zip": "3cc3aa1dbe66b81bf189276f680ec6f6a1fc7668e8e828dae50764c58a72e4a8", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream-v1.6.4.zip": "480949d2a72f1c6172b9bf01c289a5e0850ec62dadd62a6f1e03708551a07210", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/com_github_aws_aws_sdk_go_v2-v1.30.4.zip": "738e20fcbcfa2f419f5adc8c4724abf0da6164f13270033a7c92d77c852fe189", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/config/com_github_aws_aws_sdk_go_v2_config-v1.27.31.zip": "5d063d3793ae595bdc5c3f5cdac9f8015009cecbe1ddfe2280d00004bdd8c76c", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/credentials/com_github_aws_aws_sdk_go_v2_credentials-v1.17.30.zip": "9a631647ab7d062fed94e3e24f3bb6f24646786054106f7d1f3d6307fe6732a3", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/com_github_aws_aws_sdk_go_v2_feature_ec2_imds-v1.16.12.zip": "7f5b3ae98e905d96256fc742c0538f18f145a381d7d5ebebf22f2d8c09874fd9", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/feature/s3/manager/com_github_aws_aws_sdk_go_v2_feature_s3_manager-v1.17.16.zip": "a922bbd49b42eeb5cd4cb0df9d269ab7284d0c12467fffd5d36caa310238a910", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/configsources/com_github_aws_aws_sdk_go_v2_internal_configsources-v1.3.16.zip": "1166aee3aba666df11717e5d48b7c5f8c7a9647ea9970b25b2508400cd186ba1", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/com_github_aws_aws_sdk_go_v2_internal_endpoints_v2-v2.6.16.zip": "e8ffd7b794ba83ff7eafd5c90d9c12ee9ffb5101c8018067f1ca8dfdda6bf8ff", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/ini/com_github_aws_aws_sdk_go_v2_internal_ini-v1.8.1.zip": "30ceb160c10eee87c002f89ce5a89100463ec2935a980a3652fc53fff4efe21a", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/internal/v4a/com_github_aws_aws_sdk_go_v2_internal_v4a-v1.3.16.zip": "9f044285cf122648dd733eeca16bafa3bb915bd93cf8f9dda530ff7ded3d9442", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/com_github_aws_aws_sdk_go_v2_service_databasemigrationservice-v1.18.3.zip": "c69b0b562c314708e5d1542bed72e0ba068f910d3cae4909a0d80219e6b497de", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/ec2/com_github_aws_aws_sdk_go_v2_service_ec2-v1.34.0.zip": "b24b82535334bd7716000ba1af24acc03fcbbcb8817b8e229e9368c1fbbe6c3e", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/iam/com_github_aws_aws_sdk_go_v2_service_iam-v1.18.3.zip": "efb7b199ce0ae1dbea275fa3f8d131e874cc27d92c55ba7a007ad89762a88ed8", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/com_github_aws_aws_sdk_go_v2_service_internal_presigned_url-v1.9.29.zip": "cdd979528b2fdddbd9b0eaec969f33f6407d1ac9388631df7525cc9434d4dac3", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding-v1.11.4.zip": "2a75cd6211d8ee88f65c55df4ce8849491ec587b694015f3e7becc079a50ab9b", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/checksum/com_github_aws_aws_sdk_go_v2_service_internal_checksum-v1.3.18.zip": "6a91d3c79a89fac1491ae3a37373596df4720917d02a2db5101fb161751e2775", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/com_github_aws_aws_sdk_go_v2_service_internal_presigned_url-v1.11.18.zip": "97ea77a8eab61c8a7018148d2435c9d629918a75d50dd82db3f38c3311ea31d6", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/com_github_aws_aws_sdk_go_v2_service_internal_s3shared-v1.17.16.zip": "65083f6cf54a986cdc8a070a0f2abbde44c6dbf8e494f936291d5c55b14e89d1", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/kms/com_github_aws_aws_sdk_go_v2_service_kms-v1.35.5.zip": "48402147f5da42fa55b4e7d61322db3906b3e7944289531c8e1fed906c639e3c", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/rds/com_github_aws_aws_sdk_go_v2_service_rds-v1.18.4.zip": "f5de5a435c3c31b14e853e9a4348ce80646db030c9a99a178e2a12fc00585f7e", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/s3/com_github_aws_aws_sdk_go_v2_service_s3-v1.61.0.zip": "0f8751f1eaee1fc296f2892cf2d28c1f7c0eaaa7cb06666e8e704832a01c9577", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/secretsmanager/com_github_aws_aws_sdk_go_v2_service_secretsmanager-v1.18.2.zip": "44dcf0add18d221042e6a709eed9beae974e5eebfe18dd37003944b7abefb271", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sso/com_github_aws_aws_sdk_go_v2_service_sso-v1.12.13.zip": "8ca0d1458087d636596bfb9182ad4a18f52aa5a960e8600e8aeeb0d52d2a33eb", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/ssooidc/com_github_aws_aws_sdk_go_v2_service_ssooidc-v1.14.13.zip": "fb6d9039c24f3448afdb516d44b4730ba1279ca91bd10c2edfea26a020a8385b", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sts/com_github_aws_aws_sdk_go_v2_service_sts-v1.19.3.zip": "c7904f761ff6f3d00a4e544a9427aba19032239a8053ee03309bdbed0cd4ffd5", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sso/com_github_aws_aws_sdk_go_v2_service_sso-v1.22.5.zip": "9c4003dd15799bdc71c02fe5d0c67c72e4eaa625be1d3678f3aaa9984352cae3", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/ssooidc/com_github_aws_aws_sdk_go_v2_service_ssooidc-v1.26.5.zip": "3c1e0b5e33db7ebf1adc363c31b14a91d00a89ed87f15dcd76a43300d7cc85ca", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go-v2/service/sts/com_github_aws_aws_sdk_go_v2_service_sts-v1.30.5.zip": "7fa5bdfbe752f0b59584ef0b1300aa31aa561e3a733645636f415abb59bf9ba0", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/aws-sdk-go/com_github_aws_aws_sdk_go-v1.40.37.zip": "c0c481d28af88f621fb3fdeacc1e5d32f69a1bb83d0ee959f95ce89e4e2d0494", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/smithy-go/com_github_aws_smithy_go-v1.13.5.zip": "864667edd8459bff1f750d65f40922263b995a06dcb85240536539e239e911e4", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aws/smithy-go/com_github_aws_smithy_go-v1.20.4.zip": "6cf9bffdb7c793f43125f498a47ea6d37191e7a0aa81435635f47fb75dadc978", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/axiomhq/hyperloglog/com_github_axiomhq_hyperloglog-v0.0.0-20181223111420-4b99d0c2c99e.zip": "812834322ee2ca50dc36f91f9ac3f2cde4631af2f9c330b1271c78b46024a540", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aymanbagabas/go-osc52/com_github_aymanbagabas_go_osc52-v1.0.3.zip": "138e75a51599c2a8e4afe2bd6acdeaddbb73eb9ec796dfa2f577b16201660d9e", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/aymerick/douceur/com_github_aymerick_douceur-v0.2.0.zip": "dcbf69760cc1a8b32384495438e1086e4c3d669b2ebc0debd92e1865ffd6be60", @@ -327,7 +335,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/cmux/com_github_cockroachdb_cmux-v0.0.0-20170110192607-30d10be49292.zip": "88f6f9cf33eb535658540b46f6222f029398e590a3ff9cc873d7d561ac6debf0", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/cockroach-go/v2/com_github_cockroachdb_cockroach_go_v2-v2.3.7.zip": "028c29c79c2d373bca3ce9a475291285fdcb68a2f908190f738d5ce605edbd07", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/crlfmt/com_github_cockroachdb_crlfmt-v0.0.0-20221214225007-b2fc5c302548.zip": "fedc01bdd6d964da0425d5eaac8efadc951e78e13f102292cc0774197f09ab63", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/crlib/com_github_cockroachdb_crlib-v0.0.0-20240729155931-991150b7e290.zip": "75022b33828ba2c915827a3e217ea5d19bd1e7c6aa702b344cf3d349ae7f31b4", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/crlib/com_github_cockroachdb_crlib-v0.0.0-20240816115810-1c502cdb7c1d.zip": "ab49848ce4daea2599f91fd17942826bf62532b6d71af8bf8a02f4dff97118f3", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.3-0.20240530155848-7682d40af056.zip": "f4cb70fec2b2904a56bfbda6a6c8bf9ea1d568a5994ecdb825f770671119b63b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/errors/com_github_cockroachdb_errors-v1.11.3.zip": "d11ed59d96afef2d1f0ce56892839c62ff5c0cbca8dff0aaefeaef7eb190e73c", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/fifo/com_github_cockroachdb_fifo-v0.0.0-20240606204812-0bbfbd93a7ce.zip": "41e682b393cc82891ab5fcefbd28cc6173f16887702ab8760bcbc66d122e5900", @@ -335,7 +343,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/gostdlib/com_github_cockroachdb_gostdlib-v1.19.0.zip": "c4d516bcfe8c07b6fc09b8a9a07a95065b36c2855627cb3514e40c98f872b69e", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/logtags/com_github_cockroachdb_logtags-v0.0.0-20230118201751-21c54148d20b.zip": "ca7776f47e5fecb4c495490a679036bfc29d95bd7625290cfdb9abb0baf97476", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/metamorphic/com_github_cockroachdb_metamorphic-v0.0.0-20231108215700-4ba948b56895.zip": "28c8cf42192951b69378cf537be5a9a43f2aeb35542908cc4fe5f689505853ea", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20240816131806-ac08db2964cd.zip": "2684fa2648ae6aca2947fb7095ae8446a2f5f9473461b5d8d1edba1a883434da", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20240904144802-32cf8823809b.zip": "c74ef1cfeb721234f1cc8ee9e90506a975341b79c5b23ddf2cc6eddcbe1e5087", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/redact/com_github_cockroachdb_redact-v1.1.5.zip": "11b30528eb0dafc8bc1a5ba39d81277c257cbe6946a7564402f588357c164560", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/returncheck/com_github_cockroachdb_returncheck-v0.0.0-20200612231554-92cdbca611dd.zip": "ce92ba4352deec995b1f2eecf16eba7f5d51f5aa245a1c362dfe24c83d31f82b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/stress/com_github_cockroachdb_stress-v0.0.0-20220803192808-1806698b1b7b.zip": "3fda531795c600daf25532a4f98be2a1335cd1e5e182c72789bca79f5f69fcc1", @@ -396,8 +404,8 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/dave/kerr/com_github_dave_kerr-v0.0.0-20170318121727-bc25dd6abe8e.zip": "58bfff20a2f687e0f607887e88ff1044fe22186765e93b794511b1a0a625eaa1", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/dave/rebecca/com_github_dave_rebecca-v0.9.1.zip": "74c7f193fcc4a165903e3761dbff05e73e6fcd92f8cf0861029487e65da40439", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/davecgh/go-spew/com_github_davecgh_go_spew-v1.1.1.zip": "6b44a843951f371b7010c754ecc3cabefe815d5ced1c5b9409fb2d697e8a890d", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/crypto/blake256/com_github_decred_dcrd_crypto_blake256-v1.0.0.zip": "cd8bbdae14641f0ba44430fc66990dd37bbfcf1e21a965a9fd1871d16cac127d", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/dcrec/secp256k1/v4/com_github_decred_dcrd_dcrec_secp256k1_v4-v4.1.0.zip": "ce895eb53e69a058a77bc7e599fc43cef64b3c639ffec2f34faa7fef1331665d", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/crypto/blake256/com_github_decred_dcrd_crypto_blake256-v1.0.1.zip": "e4343d55494a93eb7bb7b59be9359fb8007fd36652b27a725db024f61605d515", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/decred/dcrd/dcrec/secp256k1/v4/com_github_decred_dcrd_dcrec_secp256k1_v4-v4.3.0.zip": "107cfef3902348214eb364253d75f569ea4c7a203d35eea50fa7ce10cd9cf710", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/deepmap/oapi-codegen/com_github_deepmap_oapi_codegen-v1.6.0.zip": "a89ac7cc533495fb5aa9caf2f763394af143928bf38a351495d93e220744dc4e", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/denisenkom/go-mssqldb/com_github_denisenkom_go_mssqldb-v0.12.0.zip": "568024c79d9e6c39adfa14ed4650c95ae1f976eb9c1ebee77da3c53d200080cf", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/dennwc/varint/com_github_dennwc_varint-v1.0.0.zip": "2918e66c0fb5a82dbfc8cca1ed34cb8ccff8188e876c0ca25f85b8247e53626f", @@ -524,7 +532,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gobwas/httphead/com_github_gobwas_httphead-v0.0.0-20180130184737-2c6c146eadee.zip": "5a43ed4a7cd2b063b634f0df5311c0dfa6576683bfc1339f2c5b1b1127fc392b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gobwas/pool/com_github_gobwas_pool-v0.2.0.zip": "52604b1456b92bb310461167a3e6515562f0f4214f01ed6440e3105f78be188f", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gobwas/ws/com_github_gobwas_ws-v1.0.2.zip": "f9e5c26e83278f19958c68be7b76ad6711c806b6dae766fad7692d2af867bedd", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/goccy/go-json/com_github_goccy_go_json-v0.10.2.zip": "ed9043ee01cc46557c74bcecc625db37ffe3a5c7af219f390a287f44a40c2520", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/goccy/go-json/com_github_goccy_go_json-v0.10.3.zip": "a14a4805bf6043dfd1e0e923d761f126e6c5a86c416f28f57bfebf1f8bde59e3", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gocql/gocql/com_github_gocql_gocql-v0.0.0-20200228163523-cd4b606dd2fb.zip": "40095e622040db188068b66258742938a5b083f6696b46b4a40c0391f0dafcec", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/godbus/dbus/com_github_godbus_dbus-v0.0.0-20190726142602-4481cbc300e2.zip": "e581c19036afcca2e656efcc4aa99a1348e2f9736177e206990a285d0a1c4c31", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/godbus/dbus/v5/com_github_godbus_dbus_v5-v5.0.6.zip": "0097f9b4608dc4bf5ca63cd3a9f3334e5cff6be2cab6170cdef075ef97075d89", @@ -702,7 +710,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kataras/tunnel/com_github_kataras_tunnel-v0.0.4.zip": "1ae8dcc9a6ca3f47c5f8b57767a08b0acd916eceef49c48aa9859547316db8e2", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kballard/go-shellquote/com_github_kballard_go_shellquote-v0.0.0-20180428030007-95032a82bc51.zip": "ae4cb7b097dc4eb0c248dff00ed3bbf0f36984c4162ad1d615266084e58bd6cc", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kevinburke/go-bindata/com_github_kevinburke_go_bindata-v3.13.0+incompatible.zip": "f087b3a77624a113883bac519ebd1a4de07b70ab2ebe73e61e52325ac30777e0", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kisielk/errcheck/com_github_kisielk_errcheck-v1.6.1-0.20210625163953-8ddee489636a.zip": "99d3220891162cb684f8e05d54f3d0dc58abdd496a2f0cfda7fd4a28917a719e", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kisielk/errcheck/com_github_kisielk_errcheck-v1.7.0.zip": "f394d1df1f2332387ce142d98734c5c44fb94e9a8a2af2a9b75aa4ec4a64b963", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/kisielk/gotool/com_github_kisielk_gotool-v1.0.0.zip": "089dbba6e3aa09944fdb40d72acc86694e8bdde01cfc0f40fe0248309eb80a3f", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/klauspost/asmfmt/com_github_klauspost_asmfmt-v1.3.2.zip": "fa6a350a8677a77e0dbf3664c6baf23aab5c0b60a64b8f3c00299da5d279021f", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/klauspost/compress/com_github_klauspost_compress-v1.17.8.zip": "648bbc7813dec448eec1a5a467750696bc7e41e1ac0a00b76a967c589826afb6", @@ -726,12 +734,12 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/labstack/gommon/com_github_labstack_gommon-v0.4.0.zip": "ecb8222666a0058337912bbddb2c3e9ba1f60b356248619f6936eec5bfec640b", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/leanovate/gopter/com_github_leanovate_gopter-v0.2.5-0.20190402064358-634a59d12406.zip": "67c9724f8c25304bdef375d15c39f98621e0448b5f3c2f55bf66e07b52a67128", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/leodido/go-urn/com_github_leodido_go_urn-v1.2.1.zip": "8ae6e756f0e919a551e447f286491c08ca36ceaf415c2dde395fd79c1a408d1a", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/backoff/v2/com_github_lestrrat_go_backoff_v2-v2.0.8.zip": "f5ded39eec777b7de879eb04204aa4b322683dabb22137862b09cb464f5bc617", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/blackmagic/com_github_lestrrat_go_blackmagic-v1.0.1.zip": "0621ab66f2510093f86f838db09a698027e8cbf08cc0e52bfa7d359b4f1b3745", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/blackmagic/com_github_lestrrat_go_blackmagic-v1.0.2.zip": "2baa5f21e1db4781a11d0ba2fbe8e71323c78875034da61687d80f47ae9c78ce", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/httpcc/com_github_lestrrat_go_httpcc-v1.0.1.zip": "d75132f805ea5cf6275d9af02a5ff3c116ad92ac7fc28e2a22b8fd2e029a3f4c", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/httprc/com_github_lestrrat_go_httprc-v1.0.6.zip": "19c7a7bc6d63165e24a911182fe860166b75d4557262ef031d2fba8351b44707", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/iter/com_github_lestrrat_go_iter-v1.0.2.zip": "991bf0aee428fc1a2c01d548e2c7996dc26871dd0b359c062dfc07b1fb137572", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/jwx/com_github_lestrrat_go_jwx-v1.2.25.zip": "808d6fe46171b8d6bb0a44188d7886d864f1aad5ce29b8ce1c6048745183c645", - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/option/com_github_lestrrat_go_option-v1.0.0.zip": "8c42afc182c5697b8d5e38f5004ae522fa437247850ca2cf9fe65a6ae18bfaa9", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/jwx/v2/com_github_lestrrat_go_jwx_v2-v2.1.1.zip": "f0ee5e8baf11f8d449ff3cb81b9c4421d4e437b2dc6f22d25001816b251d6d2f", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lestrrat-go/option/com_github_lestrrat_go_option-v1.0.1.zip": "3e5614e160680053e07e4970e825e694c2a917741e735ab4d435a396b739ae78", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lib/pq/com_github_lib_pq-v1.10.7.zip": "5d339f4296dcf650b4cec6b58e44988f8bbf7a4ca4bb9fff6e0421464efd7612", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lightstep/lightstep-tracer-common/golang/gogo/com_github_lightstep_lightstep_tracer_common_golang_gogo-v0.0.0-20190605223551-bc2310a04743.zip": "1bf5cd77739238376e20a64307ef850da518861421a44ce7a10a27bc3bef4874", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/lightstep/lightstep-tracer-go/com_github_lightstep_lightstep_tracer_go-v0.18.1.zip": "b90e4c08ddd881bf09dfef53affd03c9d3b246edf64e055dbea549bd31268131", @@ -932,6 +940,7 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/seccomp/libseccomp-golang/com_github_seccomp_libseccomp_golang-v0.9.2-0.20220502022130-f33da4d89646.zip": "6bbc0328826c9240ee9c08a59010b49d79d0d1264599811b6ac19f0d97494beb", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/sectioneight/md-to-godoc/com_github_sectioneight_md_to_godoc-v0.0.0-20161108233149-55e43be6c335.zip": "8b605818df307b414d0a680f147f0baeb37c9166df9e111ede5531cf50124203", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/securego/gosec/com_github_securego_gosec-v0.0.0-20200203094520-d13bb6d2420c.zip": "e0adea3cd40ba9d690b8054ff1341cf7d035084f50273a4f7bbac803fec3453a", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/segmentio/asm/com_github_segmentio_asm-v1.2.0.zip": "8e2815672f1ab3049b10185b5494006320c32afb419ccf9f14385bc25ea44def", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/segmentio/kafka-go/com_github_segmentio_kafka_go-v0.2.0.zip": "b2a88eb5b65fbb75dac0ba5e721cd2cb8e39275d1702a0f97e3c4807d78e8b48", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/sergi/go-diff/com_github_sergi_go_diff-v1.0.0.zip": "287218ffcd136dbb28ce99a2f162048d8dfa6f97b524c17797964aacde2f8f52", "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/shirou/gopsutil/v3/com_github_shirou_gopsutil_v3-v3.21.12.zip": "ea6f8b430cee40870d8d454aaa5d4c22e84d217a2548a3f755b91a96b1c67a88", @@ -1070,21 +1079,22 @@ DISTDIR_FILES = { "https://storage.googleapis.com/cockroach-godeps/gomod/go.uber.org/zap/org_uber_go_zap-v1.19.0.zip": "6437824258873fed421b7975b8e4cafd1be80cdc15e553beaa887b499dd01420", "https://storage.googleapis.com/cockroach-godeps/gomod/goji.io/io_goji-v2.0.2+incompatible.zip": "1ea69b28e356cb91381ce2339004fcf144ad1b268c9e3497c9ef304751ae0bb3", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/arch/org_golang_x_arch-v0.0.0-20180920145803-b19384d3c130.zip": "9f67b677a3fefc503111d9aa7df8bacd2677411b0fcb982eb1654aa6d14cc3f8", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.23.0.zip": "65d22b9f54aef5f7f064900d2ecf8d8b231729aebc46c3b7ca56ff897fb70b57", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.26.0.zip": "ec96acfe28be3ff2fb14201c5f51132f0e24c7d0d6f3201a8aa69c84f989d014", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/exp/org_golang_x_exp-v0.0.0-20230626212559-97b1e661b5df.zip": "af32025a065aa599a3e5b01048602a53e2b6e3938b12d33fa2a5f057be9759fa", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/exp/typeparams/org_golang_x_exp_typeparams-v0.0.0-20221208152030-732eee02a75a.zip": "9bd73f186851c6229484f486981f608d16e2b86acbbef6f4f7cc0480a508a4a4", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/image/org_golang_x_image-v0.0.0-20210628002857-a66eb6448b8d.zip": "70cf423fad9be160a88fbf01bc1897efd888f915a6d7ba0dd41ca7085f75e06e", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip": "0a4a5ebd2b1d79e7f480cbf5a54b45a257ae1ec9d11f01688efc5c35268d4603", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mobile/org_golang_x_mobile-v0.0.0-20190719004257-d2bd2a29d028.zip": "6b946c7da47acf3b6195336fd071bfc73d543cefab73f2d27528c5dc1dc829ec", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.14.0.zip": "98a122c92ad55deef674f6546b4c295ed93d106178dd24ec40449ae33b41037a", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.24.0.zip": "389940dbee4a10516de85368bb1a550d6df814ed1f893db18de8def9168147c7", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.20.0.zip": "3c3528c39639b7cd699c121c100ddb71ab49f94bff257a4a3935e3ae9e8571fc", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.28.0.zip": "c6f7bde4bb418d1f5ee5dc437d09ce9f10743ddba043cdca82eb57ddeb18d6da", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.7.0.zip": "b682f8cf62ed36f3bec9f8a832ff61a2af1124f31f42c4e1e3f3efd23d88f93f", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/perf/org_golang_x_perf-v0.0.0-20230113213139-801c7ef9e5c5.zip": "bc1b902e645fdd5d210b7db8f3280833af225b131dab5842d7a6d32a676f80f5", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sync/org_golang_x_sync-v0.7.0.zip": "20b01085240e661bffc7f59383f21b90f112d669784220c6e59c801243216d22", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.20.0.zip": "3f826b191eab1ebda925feb551d334e37e1b5865d1aa790fade46598811a8b1a", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/term/org_golang_x_term-v0.20.0.zip": "840eacc0ffb306dcb4b0f5bf6e071c91d2e7957fcc604eec4e73c0fc22f2920c", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/text/org_golang_x_text-v0.15.0.zip": "13faee7e46c8a18c8a28f3eceebf15db6d724b9a108c3c0482a6d2e58ba73a73", - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.17.0.zip": "f4c5fce4cd013419429592043ce0549f47060dfe6f44cca940224cd48c3e28ad", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sync/org_golang_x_sync-v0.8.0.zip": "c79473c265ca571d389bf64fa1e7b2d8999b4ab3eb7af5e3bc185644783a1087", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.23.0.zip": "5bf721c4404580d5350d0a0297c1f48f07c05db8a0d2a20677e6cb295380b9a7", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/telemetry/org_golang_x_telemetry-v0.0.0-20240521205824-bda55230c457.zip": "8e8649337973d064cc44fa858787db7d0eb90f0806807349766d180ed6889f5c", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/term/org_golang_x_term-v0.23.0.zip": "2597a62b487b952c11c89b2001551af1fe1d29c484388ec1c3f5e3be7ff58ba5", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/text/org_golang_x_text-v0.17.0.zip": "48464f2ab2f988ca8b7b0a9d098e3664224c3b128629b5a9cc08025ee4a7e4ec", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.24.0.zip": "92607be1cacf4647fd31b19ee64b1a7c198178f1005c75371e38e7b08fb138e7", "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/xerrors/org_golang_x_xerrors-v0.0.0-20220907171357-04be3eba64a2.zip": "b9c481db33c4b682ba8ba348018ddbd2155bd227cc38ff9f6b4cb2b74bbc3c14", "https://storage.googleapis.com/cockroach-godeps/gomod/gonum.org/v1/gonum/org_gonum_v1_gonum-v0.11.0.zip": "abdfee15ce7c9d2cd96b66468d3ae28d6054add4efbfc1b15fadfe3613f3d362", "https://storage.googleapis.com/cockroach-godeps/gomod/gonum.org/v1/netlib/org_gonum_v1_netlib-v0.0.0-20190331212654-76723241ea4e.zip": "ed4dca5026c9ab5410d23bbe21c089433ca58a19bd2902311c6a91791142a687", diff --git a/build/github/cockroach-microbench-ci.sh b/build/github/cockroach-microbench-ci.sh index 477e62071bcd..85be082d9141 100755 --- a/build/github/cockroach-microbench-ci.sh +++ b/build/github/cockroach-microbench-ci.sh @@ -33,7 +33,7 @@ mkdir -p "$output_dir" # Run benchmarks and clean output # running count=4 here because that's the minimum required for a comparison bazel test //pkg/sql/tests:tests_test \ - --config=use_ci_timeouts \ + --test_timeout=1800 \ --strategy=TestRunner=sandboxed \ --jobs 100 \ --config=crosslinux \ @@ -44,7 +44,7 @@ bazel test //pkg/sql/tests:tests_test \ --test_sharding_strategy=disabled \ --test_arg=-test.cpu --test_arg=1 \ --test_arg=-test.v \ - --test_arg=-test.count=4 \ + --test_arg=-test.count=10 \ --test_arg=-test.benchmem \ --crdb_test_off \ --test_output=all > "$log_output_file_path" @@ -64,7 +64,7 @@ if ! gcloud storage cp "$storage_bucket_url/$GITHUB_BASE_REF/$BASE_SHA.log" "$cl exit $success_exit_status fi -if ! $roachprod_microbench_dir/roachprod-microbench compare "$cleaned_current_dir" "$cleaned_base_dir" --threshold="$threshold" --publish-sheets=false; then +if ! $roachprod_microbench_dir/roachprod-microbench compare "$cleaned_current_dir" "$cleaned_base_dir" --threshold="$threshold"; then echo "There is an error during comparison. If it's a perf regression, please try to fix it. This won't block your change for merging currently." exit $error_exit_status fi diff --git a/build/patches/com_github_kisielk_errcheck.patch b/build/patches/com_github_kisielk_errcheck.patch index 6b4410a0e9a7..32c91fe1927a 100644 --- a/build/patches/com_github_kisielk_errcheck.patch +++ b/build/patches/com_github_kisielk_errcheck.patch @@ -1,18 +1,15 @@ diff -urN a/errcheck/analyzer.go b/errcheck/analyzer.go --- a/errcheck/analyzer.go +++ b/errcheck/analyzer.go -@@ -3,9 +3,9 @@ package errcheck - import ( - "fmt" +@@ -5,6 +5,7 @@ "go/ast" -- "go/token" "reflect" "regexp" + "strings" "golang.org/x/tools/go/analysis" ) -@@ -21,6 +21,7 @@ +@@ -20,6 +21,7 @@ argBlank bool argAsserts bool argExcludeFile string @@ -20,7 +17,7 @@ diff -urN a/errcheck/analyzer.go b/errcheck/analyzer.go argExcludeOnly bool ) -@@ -28,6 +29,7 @@ +@@ -27,6 +29,7 @@ Analyzer.Flags.BoolVar(&argBlank, "blank", false, "if true, check for errors assigned to blank identifier") Analyzer.Flags.BoolVar(&argAsserts, "assert", false, "if true, check for ignored type assertion results") Analyzer.Flags.StringVar(&argExcludeFile, "exclude", "", "Path to a file containing a list of functions to exclude from checking") @@ -28,7 +25,7 @@ diff -urN a/errcheck/analyzer.go b/errcheck/analyzer.go Analyzer.Flags.BoolVar(&argExcludeOnly, "excludeonly", false, "Use only excludes from exclude file") } -@@ -39,7 +41,14 @@ +@@ -37,7 +40,14 @@ exclude[name] = true } } @@ -44,14 +41,14 @@ diff -urN a/errcheck/analyzer.go b/errcheck/analyzer.go excludes, err := ReadExcludes(argExcludeFile) if err != nil { return nil, fmt.Errorf("Could not read exclude file: %v\n", err) -@@ -65,8 +74,9 @@ +@@ -63,8 +73,9 @@ ast.Walk(v, f) for _, err := range v.errors { + fsetFile := pass.Fset.File(f.Pos()) pass.Report(analysis.Diagnostic{ -- Pos: token.Pos(int(f.Pos()) + err.Pos.Offset), -+ Pos: fsetFile.Pos(err.Pos.Offset), - Message: "unchecked error", +- Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), ++ Pos: fsetFile.Pos(err.Pos.Offset), + Message: "unchecked error", + Category: "errcheck", }) - } diff --git a/build/teamcity-support.sh b/build/teamcity-support.sh index 39b9863b76a1..cfb3aa664213 100755 --- a/build/teamcity-support.sh +++ b/build/teamcity-support.sh @@ -311,3 +311,10 @@ check_workspace_clean() { fi echo "##teamcity[testFinished name='CheckGeneratedCode/$1']" } + +# Check if a given GCS path exists +function check_gcs_path_exists() { + local path=$1 + gsutil ls "$path" &>/dev/null + return +} diff --git a/build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh b/build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh index 8f230348cfc6..99888e42766d 100755 --- a/build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh +++ b/build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh @@ -9,19 +9,11 @@ source "$dir/teamcity-support.sh" google_credentials="$GOOGLE_EPHEMERAL_CREDENTIALS" log_into_gcloud -# Check if a given GCS path exists -function check_gcs_path_exists() { - local path=$1 - gsutil ls "$path" &>/dev/null - return -} - # Build and copy binaries, for the given SHA, to GCS bucket function build_and_upload_binaries() { local sha=$1 - archive_name=${BENCH_PACKAGE//\//-} - archive_name="$sha-${archive_name/.../all}.tar.gz" - if check_gcs_path_exists "gs://$BUILDS_BUCKET/builds/$archive_name"; then + archive_name="$sha-${SANITIZED_BENCH_PACKAGE}.tar.gz" + if check_gcs_path_exists "gs://$BENCH_BUCKET/builds/$archive_name"; then echo "Build for $sha already exists. Skipping..." return fi @@ -57,7 +49,7 @@ EOF out_dir=$(mktemp -d) tar -chf - -C "$stage_dir" . | ./bin/roachprod-microbench compress > "$out_dir/$archive_name" rm -rf "$stage_dir" - gsutil -q -m cp "$out_dir/$archive_name" "gs://$BUILDS_BUCKET/builds/$archive_name" + gsutil -q -m cp "$out_dir/$archive_name" "gs://$BENCH_BUCKET/builds/$archive_name" rm -rf "$out_dir" } diff --git a/build/teamcity/cockroach/nightlies/microbenchmark_weekly.sh b/build/teamcity/cockroach/nightlies/microbenchmark_weekly.sh index ee934bbf4202..c0f374b56927 100755 --- a/build/teamcity/cockroach/nightlies/microbenchmark_weekly.sh +++ b/build/teamcity/cockroach/nightlies/microbenchmark_weekly.sh @@ -1,12 +1,12 @@ #!/usr/bin/env bash # # This script runs microbenchmarks across a roachprod cluster. It will build microbenchmark binaries -# for the given revisions if they do not already exist in the BUILDS_BUCKET. It will then create a +# for the given revisions if they do not already exist in the BENCH_BUCKET. It will then create a # roachprod cluster, stage the binaries on the cluster, and run the microbenchmarks. # Parameters (and suggested defaults): # BENCH_REVISION: revision to build and run benchmarks against (default: master) # BENCH_COMPARE_REVISION: revision to compare against (default: latest release branch) -# BUILDS_BUCKET: GCS bucket to store the built binaries +# BENCH_BUCKET: GCS bucket to store the built binaries and compare cache (default: cockroach-microbench) # BENCH_PACKAGE: package to build and run benchmarks against (default: ./pkg/...) # BENCH_ITERATIONS: number of iterations to run each microbenchmark (default: 10) # GCE_NODE_COUNT: number of nodes to use in the roachprod cluster (default: 12) @@ -26,8 +26,22 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel output_dir="./artifacts/microbench" +remote_dir="/mnt/data1" +benchmarks_commit=$(git rev-parse HEAD) exit_status=0 +# Set up credentials +google_credentials="$GOOGLE_EPHEMERAL_CREDENTIALS" +log_into_gcloud +export GOOGLE_APPLICATION_CREDENTIALS="$PWD/.google-credentials.json" +export ROACHPROD_USER=teamcity +export ROACHPROD_CLUSTER=teamcity-microbench-${TC_BUILD_ID} +generate_ssh_key + +# Sanatize the package name for use in paths +SANITIZED_BENCH_PACKAGE=${BENCH_PACKAGE//\//-} +export SANITIZED_BENCH_PACKAGE=${SANITIZED_BENCH_PACKAGE/.../all} + # Build rochprod and roachprod-microbench run_bazel <<'EOF' bazel build --config ci --config crosslinux //pkg/cmd/roachprod //pkg/cmd/roachprod-microbench @@ -61,17 +75,24 @@ for rev in "${revisions[@]}"; do sha_arr+=("$sha") done +# Check if the baseline cache exists and copy it to the output directory. +baseline_cache_path="gs://$BENCH_BUCKET/cache/$GCE_MACHINE_TYPE/$SANITIZED_BENCH_PACKAGE/${sha_arr[1]}" +declare -a build_sha_arr +build_sha_arr+=("${sha_arr[0]}") +if check_gcs_path_exists "$baseline_cache_path"; then + mkdir -p "$output_dir/baseline" + gsutil -mq cp -r "$baseline_cache_path/*" "$output_dir/baseline" + echo "Baseline cache found for ${name_arr[1]}. Using it for comparison." +else + build_sha_arr+=("${sha_arr[1]}") +fi + # Builds binaries for the given SHAs. -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e GOOGLE_EPHEMERAL_CREDENTIALS -e BENCH_PACKAGE -e BUILDS_BUCKET" \ - run_bazel build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh "${sha_arr[@]}" +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e GOOGLE_EPHEMERAL_CREDENTIALS -e SANITIZED_BENCH_PACKAGE -e BENCH_PACKAGE -e BENCH_BUCKET" \ + run_bazel build/teamcity/cockroach/nightlies/microbenchmark_build_support.sh "${build_sha_arr[@]}" -# Set up credentials (needs to be done after the build phase) -google_credentials="$GOOGLE_EPHEMERAL_CREDENTIALS" +# Log into gcloud again (credentials are removed by teamcity-support in the build script) log_into_gcloud -export GOOGLE_APPLICATION_CREDENTIALS="$PWD/.google-credentials.json" -export ROACHPROD_USER=teamcity -export ROACHPROD_CLUSTER=teamcity-microbench-${TC_BUILD_ID} -generate_ssh_key # Create roachprod cluster ./bin/roachprod create "$ROACHPROD_CLUSTER" -n "$GCE_NODE_COUNT" \ @@ -81,19 +102,21 @@ generate_ssh_key --gce-zones="$GCE_ZONE" \ --gce-managed \ --gce-use-spot \ - --os-volume-size=384 + --local-ssd=false \ + --gce-pd-volume-size=384 \ + --os-volume-size=10 # Stage binaries on the cluster -for sha in "${sha_arr[@]}"; do +for sha in "${build_sha_arr[@]}"; do archive_name=${BENCH_PACKAGE//\//-} archive_name="$sha-${archive_name/.../all}.tar.gz" - ./bin/roachprod-microbench stage --quiet "$ROACHPROD_CLUSTER" "gs://$BUILDS_BUCKET/builds/$archive_name" "$sha" + ./bin/roachprod-microbench stage --quiet "$ROACHPROD_CLUSTER" "gs://$BENCH_BUCKET/builds/$archive_name" "$remote_dir/$sha" done # Execute microbenchmarks ./bin/roachprod-microbench run "$ROACHPROD_CLUSTER" \ - --binaries experiment="${sha_arr[0]}" \ - --binaries baseline="${sha_arr[1]}" \ + --binaries experiment="$remote_dir/${build_sha_arr[0]}" \ + ${build_sha_arr[1]:+--binaries baseline="$remote_dir/${build_sha_arr[1]}"} \ --output-dir="$output_dir" \ --iterations "$BENCH_ITERATIONS" \ --shell="$BENCH_SHELL" \ @@ -104,10 +127,39 @@ done -- "$TEST_ARGS" \ || exit_status=$? +# Write metadata to a file for each set of benchmarks +declare -A metadata +metadata["run-time"]=$(date -u +"%Y-%m-%dT%H:%M:%SZ") +metadata["baseline-commit"]=${sha_arr[1]} +metadata["benchmarks-commit"]=$benchmarks_commit +metadata["machine"]=$GCE_MACHINE_TYPE +metadata["goarch"]=amd64 +metadata["goos"]=linux +metadata["repository"]=cockroach +echo "" > "$output_dir/baseline/metadata.log" +for key in "${!metadata[@]}"; do + echo "$key": "${metadata[$key]}" >> "$output_dir/baseline/metadata.log" +done + +metadata["experiment-commit"]=${sha_arr[0]} +metadata["experiment-commit-time"]=$(git show -s --format=%cI "${sha_arr[0]}") +for key in "${!metadata[@]}"; do + echo "$key": "${metadata[$key]}" >> "$output_dir/experiment/metadata.log" +done + +# Push baseline to cache if we ran both benchmarks +if [[ ${#build_sha_arr[@]} -gt 1 ]]; then + gsutil -mq cp -r "$output_dir/baseline" "$baseline_cache_path" +fi + +# Push experiment results to cache +experiment_cache_path="gs://$BENCH_BUCKET/cache/$GCE_MACHINE_TYPE/$SANITIZED_BENCH_PACKAGE/${sha_arr[0]}" +gsutil -mq cp -r "$output_dir/experiment" "$experiment_cache_path" -# Compare the results, if both sets of benchmarks were run -if [ -d "$output_dir/0" ] && [ "$(ls -A "$output_dir/0")" ] \ -&& [ -d "$output_dir/1" ] && [ "$(ls -A "$output_dir/1")" ]; then +# Compare the results, if both sets of benchmarks were run. +# These should exist if the benchmarks were run successfully. +if [ -d "$output_dir/experiment" ] && [ "$(ls -A "$output_dir/experiment")" ] \ +&& [ -d "$output_dir/baseline" ] && [ "$(ls -A "$output_dir/baseline")" ]; then # Set up slack token only if the build was triggered by TeamCity (not a manual run) if [ -n "${TRIGGERED_BUILD:-}" ]; then slack_token="${MICROBENCH_SLACK_TOKEN}" diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh index 014f0a8035ea..51a557403e29 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_aws.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_ACCESS_KEY_ID_ASSUME_ROLE -e AWS_KMS_KEY_ARN_A -e AWS_KMS_KEY_ARN_B -e AWS_KMS_REGION_A -e AWS_KMS_REGION_B -e AWS_ROLE_ARN -e AWS_SECRET_ACCESS_KEY -e AWS_SECRET_ACCESS_KEY_ASSUME_ROLE -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e ARM_PROBABILITY -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_ACCESS_KEY_ID_ASSUME_ROLE -e AWS_KMS_KEY_ARN_A -e AWS_KMS_KEY_ARN_B -e AWS_KMS_REGION_A -e AWS_KMS_REGION_B -e AWS_ROLE_ARN -e AWS_SECRET_ACCESS_KEY -e AWS_SECRET_ACCESS_KEY_ASSUME_ROLE -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e ARM_PROBABILITY -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN -e COCKROACH_EA_PROBABILITY" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_azure.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_azure.sh index 1c05e5ea386c..aced8614b022 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_azure.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_azure.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AZURE_CLIENT_ID -e AZURE_CLIENT_SECRET -e AZURE_SUBSCRIPTION_ID -e AZURE_TENANT_ID -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e CLEAR_CLUSTER_CACHE -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AZURE_CLIENT_ID -e AZURE_CLIENT_SECRET -e AZURE_SUBSCRIPTION_ID -e AZURE_TENANT_ID -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e CLEAR_CLUSTER_CACHE -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN -e COCKROACH_EA_PROBABILITY" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh index 089f6a76dbc9..7f10eaacb8ad 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_gce.sh @@ -7,5 +7,5 @@ dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" source "$dir/teamcity-support.sh" # For $root source "$dir/teamcity-bazel-support.sh" # For run_bazel -BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_A -e GOOGLE_KMS_KEY_B -e GOOGLE_CREDENTIALS_ASSUME_ROLE -e GOOGLE_SERVICE_ACCOUNT -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e GRAFANA_SERVICE_ACCOUNT_JSON -e GRAFANA_SERVICE_ACCOUNT_AUDIENCE -e ARM_PROBABILITY -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN" \ +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e BUILD_VCS_NUMBER -e CLOUD -e COCKROACH_DEV_LICENSE -e TESTS -e COUNT -e GITHUB_API_TOKEN -e GITHUB_ORG -e GITHUB_REPO -e GOOGLE_EPHEMERAL_CREDENTIALS -e GOOGLE_KMS_KEY_A -e GOOGLE_KMS_KEY_B -e GOOGLE_CREDENTIALS_ASSUME_ROLE -e GOOGLE_SERVICE_ACCOUNT -e SLACK_TOKEN -e TC_BUILDTYPE_ID -e TC_BUILD_BRANCH -e TC_BUILD_ID -e TC_SERVER_URL -e SELECT_PROBABILITY -e COCKROACH_RANDOM_SEED -e ROACHTEST_ASSERTIONS_ENABLED_SEED -e ROACHTEST_FORCE_RUN_INVALID_RELEASE_BRANCH -e GRAFANA_SERVICE_ACCOUNT_JSON -e GRAFANA_SERVICE_ACCOUNT_AUDIENCE -e ARM_PROBABILITY -e USE_SPOT -e SELECTIVE_TESTS -e SFUSER -e SFPASSWORD -e SIDE_EYE_API_TOKEN -e COCKROACH_EA_PROBABILITY" \ run_bazel build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh diff --git a/build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh b/build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh index 42c4fddddb7c..c70792f44d95 100755 --- a/build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh +++ b/build/teamcity/cockroach/nightlies/roachtest_nightly_impl.sh @@ -62,6 +62,7 @@ fi build/teamcity-roachtest-invoke.sh \ --metamorphic-encryption-probability=0.5 \ --metamorphic-arm64-probability="${ARM_PROBABILITY:-0.5}" \ + --metamorphic-cockroach-ea-probability="${COCKROACH_EA_PROBABILITY:-0.2}" \ --select-probability="${select_probability}" \ --use-spot="${USE_SPOT:-auto}" \ --cloud="${CLOUD}" \ diff --git a/build/variables.mk b/build/variables.mk deleted file mode 100644 index 687569970b39..000000000000 --- a/build/variables.mk +++ /dev/null @@ -1,154 +0,0 @@ -# Code generated by Make. DO NOT EDIT. -# GENERATED FILE DO NOT EDIT -define VALID_VARS - ACCEPTANCETIMEOUT - ANTITHESIS - BASE_CGO_FLAGS_FILES - BENCHES - BENCHTIMEOUT - BUILDINFO - BUILDINFO_TAG - BUILDTARGET - BUILDTYPE - BUILD_DIR - BUILD_TAGGED_RELEASE - CFLAGS - CGO_CFLAGS - CGO_CXXFLAGS - CGO_FLAGS_FILES - CGO_LDFLAGS - CGO_PKGS - CGO_SUFFIXED_FLAGS_FILES - CGO_UNSUFFIXED_FLAGS_FILES - CLEANUP_TARGETS - CLUSTER - CLUSTER_UI_JS - COCKROACH - COCKROACHOSS - COCKROACHSHORT - COCKROACHSQL - COREOS_PATH - CXXFLAGS - C_DEPS_DIR - C_LIBS_CCL - C_LIBS_COMMON - C_LIBS_DYNAMIC - C_LIBS_OSS - C_LIBS_SHORT - DESTDIR - DOCGEN_TARGETS - DUPLFLAGS - DYN_EXT - DYN_LIB_DIR - ERRORS_PATH - ERRORS_PROTO - ESLINT_PLUGIN_CRDB - EVENTLOG_PROTOS - EVENTPBGEN_PKG - EVENTPB_PROTOS - EXECGEN_TARGETS - EXTRA_XCMAKE_FLAGS - EXTRA_XCONFIGURE_FLAGS - FILES - FIND_RELEVANT - GENERATED_TARGETS - GEOS_DIR - GEOS_NATIVE_LIB_DIR - GEOS_SRC_DIR - GITHOOKS - GITHOOKSDIR - GIT_DIR - GO - GOEXE - GOFLAGS - GOGOPROTO_PROTO - GOGO_PROTOBUF_PATH - GOPATH - GORACE - GOTESTFLAGS - GO_ENV_CC - GO_ENV_CXX - GO_INSTALL - GO_PROTOS - GO_SOURCES - GRPC_GATEWAY_GOOGLEAPIS_PACKAGE - GRPC_GATEWAY_GOOGLEAPIS_PATH - GW_PROTOS - GW_SERVER_PROTOS - GW_SOURCES - GW_TS_PROTOS - HOST_TRIPLE - IGNORE_GOVERS - INSTALL - INSTRUMENTATION_TMP - INSTRUMENTOR_BIN - INSTRUMENTOR_EXCLUDE_VENDOR - JEMALLOC_DIR - JEMALLOC_SRC_DIR - JS_PROTOS_CCL - KRB_CPPFLAGS - KRB_DIR - LC_ALL - LDFLAGS - LIBGEOS - LIBJEMALLOC - LIBPROJ - LINTTIMEOUT - LOGSINKDOC_DEP - LOG_TARGETS - MACOSX_DEPLOYMENT_TARGET - MAKECMDGOALS - MAKEFLAGS - MAKE_TERMERR - NCPUS - NODE_RUN - OPTGEN_TARGETS - PATH - PBJS - PBTS - PKG - PORT - PREREQS - PROJ_DIR - PROJ_SRC_DIR - PROMETHEUS_PATH - PROTOBUF_TARGETS - PROTO_MAPPINGS - RACETIMEOUT - SETTINGS_DOC_PAGES - SHELL - SQLPARSER_TARGETS - STARTFLAGS - STRESSFLAGS - STYLINT - SUBTESTS - SUFFIX - TAGS - TAR - TARGET - TARGET_TRIPLE - TAR_XFORM_FLAG - TESTCONFIG - TESTFLAGS - TESTS - TESTTIMEOUT - TSC - TYPE - TZ - UI_JS_CCL - UI_JS_OSS - UI_PROTOS_CCL - UI_PROTOS_OSS - UI_TS_CCL - UI_TS_OSS - UNAME - VERBOSE - WEBPACK - WEBPACK_DEV_SERVER - XCC - XCMAKE_SYSTEM_NAME - XCXX - XGOARCH - XGOOS - prefix -endef diff --git a/build/werror.sh b/build/werror.sh deleted file mode 100755 index aaf39b32d3c0..000000000000 --- a/build/werror.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# werror.sh promotes warnings produced by a command to fatal errors, just like -# GCC's -Werror flag. -# -# More formally, `werror.sh COMMAND [ARGS...]` invokes COMMAND with the -# specified ARGS. If any output is produced on stdout or stderr, werror.sh -# prints both streams to stderr and exits with a failing exit code. Otherwise, -# werror.sh exits with the exit code of COMMAND. -# -# werror.sh is a blunt instrument and does not attempt to distinguish between -# output that represents a warning and output that does not. Prefer to use a -# -Werror flag if available. - -output=$("$@" 2>&1) -exit_code=$? -if [[ "$output" ]]; then - echo "$output" >&2 - echo "fatal: treating warnings from $(basename $1) as errors" - exit 1 -fi -exit "$exit_code" diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml index 3a2029a7ba07..c7e16770fd74 100644 --- a/cloud/kubernetes/bring-your-own-certs/client.yaml +++ b/cloud/kubernetes/bring-your-own-certs/client.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. command: diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml index 5495c0f3b87e..5007a1b99e5c 100644 --- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml @@ -153,7 +153,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml index 3e3fef7600c6..f61d732f29a2 100644 --- a/cloud/kubernetes/client-secure.yaml +++ b/cloud/kubernetes/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml index 4af3e02b94ae..0038e270d6af 100644 --- a/cloud/kubernetes/cluster-init-secure.yaml +++ b/cloud/kubernetes/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml index 69e9d94d40d8..53cd2a296507 100644 --- a/cloud/kubernetes/cluster-init.yaml +++ b/cloud/kubernetes/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml index ab795afad796..d036db12ebcf 100644 --- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -195,7 +195,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml index 168545d44897..8c17fda1c02f 100644 --- a/cloud/kubernetes/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -98,7 +98,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml index 7836318a281c..47d6cf855516 100644 --- a/cloud/kubernetes/multiregion/client-secure.yaml +++ b/cloud/kubernetes/multiregion/client-secure.yaml @@ -9,7 +9,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml index 407dddf71d0a..c30dcaf1ba91 100644 --- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml +++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -11,7 +11,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml index 700e607f0c9f..244dcfaea3b9 100644 --- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -167,7 +167,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml index 59924007a10f..c65b0337bbb2 100644 --- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml +++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -185,7 +185,7 @@ spec: name: cockroach-env containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml index f8ab0daa53fb..8d5429e4a933 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml @@ -82,7 +82,7 @@ spec: hostNetwork: true containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml index 7299ae8494fb..89c8b3b86eee 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml @@ -198,7 +198,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml index f30379d7a4ab..cdd165498a47 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -141,7 +141,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml index 614eb366fc46..dd0606ecefb0 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -232,7 +232,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml index 74f790743852..4a945b0c0173 100644 --- a/cloud/kubernetes/v1.6/client-secure.yaml +++ b/cloud/kubernetes/v1.6/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml index 82808e9927bb..f4fa77872de6 100644 --- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml index bbccc07663d2..30e3349eee9c 100644 --- a/cloud/kubernetes/v1.6/cluster-init.yaml +++ b/cloud/kubernetes/v1.6/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml index 645e321a7497..07cace7a1182 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml @@ -178,7 +178,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml index ff3b7a6c5ba4..876c1e50f352 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml @@ -81,7 +81,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml index 1ddc803bc3d3..99e7424be89b 100644 --- a/cloud/kubernetes/v1.7/client-secure.yaml +++ b/cloud/kubernetes/v1.7/client-secure.yaml @@ -32,7 +32,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml index a51ddaa1ed9e..84dac4f4bcd0 100644 --- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml @@ -34,7 +34,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml index cb2b777121d2..da47ba6ae1c6 100644 --- a/cloud/kubernetes/v1.7/cluster-init.yaml +++ b/cloud/kubernetes/v1.7/cluster-init.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml index 1ffc09e9393a..be6b5e6f3fdc 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml @@ -190,7 +190,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml index 9a4f199b50d2..7b92c6e43170 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml @@ -93,7 +93,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v24.1.3 + image: cockroachdb/cockroach:v24.2.0 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/docs/RFCS/20240103_generic_query_plans.md b/docs/RFCS/20240103_generic_query_plans.md index 1aeb347d7eb1..d7df25ffd20a 100644 --- a/docs/RFCS/20240103_generic_query_plans.md +++ b/docs/RFCS/20240103_generic_query_plans.md @@ -313,16 +313,6 @@ some of this work if a normalized, base query plan is available. The base plan can be used as a starting point for building the generic plan, eliminating some of the overhead of `optbuilder` and normalization. -### Adding Generic Plans to the Query Cache - -A notable divergence of CockroachDB from Postgres is that CockroachDB has a -query cache which is used across sessions of the same gateway node. Storing -generic query plans in the query cache would allow sessions to reuse generic -plans across sessions, reducing planning overhead further. This will bring the -benefits of generic query plans to applications using client libraries and ORMs -that don't use proper, named prepared statements, such as -[knex.js](https://github.com/knex/knex/issues/802). - ### Replacing the placeholder fast path Generic query plans should make the placeholder fast path obsolete. Once we have diff --git a/docs/generated/http/BUILD.bazel b/docs/generated/http/BUILD.bazel index 5e0782b48157..22d61aeca2a3 100644 --- a/docs/generated/http/BUILD.bazel +++ b/docs/generated/http/BUILD.bazel @@ -1,8 +1,8 @@ -load(":defs.bzl", "PROTOBUF_TARGETS") +load("//pkg:protos.bzl", "SERVER_PROTOS") genrule( name = "http", - srcs = PROTOBUF_TARGETS, + srcs = SERVER_PROTOS, outs = [ "full.md", "health-other.md", diff --git a/docs/generated/http/defs.bzl b/docs/generated/http/defs.bzl deleted file mode 100644 index fa6832ddd0d7..000000000000 --- a/docs/generated/http/defs.bzl +++ /dev/null @@ -1,62 +0,0 @@ -# PROTOBUF_TARGETS lists the indirect dependencies needed to compile proto dependencies -# of //pkg/server/serverpb:serverpb_proto target -PROTOBUF_TARGETS = [ - "//pkg/build:build_proto", - "//pkg/clusterversion:clusterversion_proto", - "//pkg/config/zonepb:zonepb_proto", - "//pkg/geo/geopb:geopb_proto", - "//pkg/gossip:gossip_proto", - "//pkg/jobs/jobspb:jobspb_proto", - "//pkg/kv/kvpb:kvpb_proto", - "//pkg/kv/kvserver/concurrency/isolation:isolation_proto", - "//pkg/kv/kvserver/concurrency/lock:lock_proto", - "//pkg/kv/kvserver/kvserverpb:kvserverpb_proto", - "//pkg/kv/kvserver/liveness/livenesspb:livenesspb_proto", - "//pkg/kv/kvserver/loqrecovery/loqrecoverypb:loqrecoverypb_proto", - "//pkg/kv/kvserver/readsummary/rspb:rspb_proto", - "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb:kvflowcontrolpb_proto", - "//pkg/multitenant/mtinfopb:mtinfopb_proto", - "//pkg/multitenant/tenantcapabilities/tenantcapabilitiespb:tenantcapabilitiespb_proto", - "//pkg/raft/raftpb:raftpb_proto", - "//pkg/roachpb:roachpb_proto", - "//pkg/rpc/rpcpb:rpcpb_proto", - "//pkg/server/diagnostics/diagnosticspb:diagnosticspb_proto", - "//pkg/server/serverpb:serverpb_proto", - "//pkg/server/status/statuspb:statuspb_proto", - "//pkg/settings:settings_proto", - "//pkg/sql/appstatspb:appstatspb_proto", - "//pkg/sql/catalog/catenumpb:catenumpb_proto", - "//pkg/sql/catalog/catpb:catpb_proto", - "//pkg/sql/catalog/descpb:descpb_proto", - "//pkg/sql/catalog/fetchpb:fetchpb_proto", - "//pkg/sql/contentionpb:contentionpb_proto", - "//pkg/sql/execinfrapb:execinfrapb_proto", - "//pkg/sql/sem/semenumpb:semenumpb_proto", - "//pkg/sql/inverted:inverted_proto", - "//pkg/sql/lex:lex_proto", - "//pkg/sql/pgwire/pgerror:pgerror_proto", - "//pkg/sql/schemachanger/scpb:scpb_proto", - "//pkg/sql/sessiondatapb:sessiondatapb_proto", - "//pkg/sql/sqlstats/insights:insights_proto", - "//pkg/sql/types:types_proto", - "//pkg/storage/enginepb:enginepb_proto", - "//pkg/ts/catalog:catalog_proto", - "//pkg/ts/tspb:tspb_proto", - "//pkg/util/duration:duration_proto", - "//pkg/util/hlc:hlc_proto", - "//pkg/util/admission/admissionpb:admissionpb_proto", - "//pkg/util/log/logpb:logpb_proto", - "//pkg/util/metric:metric_proto", - "//pkg/util/timeutil/pgdate:pgdate_proto", - "//pkg/util/tracing/tracingpb:tracingpb_proto", - "//pkg/util:util_proto", - "@com_github_prometheus_client_model//io/prometheus/client:io_prometheus_client_proto", - "@com_github_cockroachdb_errors//errorspb:errorspb_proto", - "@com_github_gogo_protobuf//gogoproto:gogo_proto", - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:descriptor_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:timestamp_proto", - "@go_googleapis//google/api:annotations_proto", - "@go_googleapis//google/api:http_proto", -] diff --git a/docs/generated/http/full.md b/docs/generated/http/full.md index f066f4641834..44b42036f1f5 100644 --- a/docs/generated/http/full.md +++ b/docs/generated/http/full.md @@ -1326,6 +1326,7 @@ RangeProblems describes issues reported by a range. For internal use only. | raft_log_too_large | [bool](#cockroach.server.serverpb.RaftDebugResponse-bool) | | When the raft log is too large, it can be a symptom of other issues. | [reserved](#support-status) | | circuit_breaker_error | [bool](#cockroach.server.serverpb.RaftDebugResponse-bool) | | | [reserved](#support-status) | | paused_followers | [bool](#cockroach.server.serverpb.RaftDebugResponse-bool) | | | [reserved](#support-status) | +| range_too_large | [bool](#cockroach.server.serverpb.RaftDebugResponse-bool) | | | [reserved](#support-status) | @@ -1575,6 +1576,7 @@ RangeProblems describes issues reported by a range. For internal use only. | raft_log_too_large | [bool](#cockroach.server.serverpb.RangesResponse-bool) | | When the raft log is too large, it can be a symptom of other issues. | [reserved](#support-status) | | circuit_breaker_error | [bool](#cockroach.server.serverpb.RangesResponse-bool) | | | [reserved](#support-status) | | paused_followers | [bool](#cockroach.server.serverpb.RangesResponse-bool) | | | [reserved](#support-status) | +| range_too_large | [bool](#cockroach.server.serverpb.RangesResponse-bool) | | | [reserved](#support-status) | @@ -3408,6 +3410,7 @@ Support status: [reserved](#support-status) | raft_log_too_large_range_ids | [int64](#cockroach.server.serverpb.ProblemRangesResponse-int64) | repeated | | [reserved](#support-status) | | circuit_breaker_error_range_ids | [int64](#cockroach.server.serverpb.ProblemRangesResponse-int64) | repeated | | [reserved](#support-status) | | paused_replica_ids | [int64](#cockroach.server.serverpb.ProblemRangesResponse-int64) | repeated | | [reserved](#support-status) | +| too_large_range_ids | [int64](#cockroach.server.serverpb.ProblemRangesResponse-int64) | repeated | | [reserved](#support-status) | @@ -3956,6 +3959,7 @@ RangeProblems describes issues reported by a range. For internal use only. | raft_log_too_large | [bool](#cockroach.server.serverpb.RangeResponse-bool) | | When the raft log is too large, it can be a symptom of other issues. | [reserved](#support-status) | | circuit_breaker_error | [bool](#cockroach.server.serverpb.RangeResponse-bool) | | | [reserved](#support-status) | | paused_followers | [bool](#cockroach.server.serverpb.RangeResponse-bool) | | | [reserved](#support-status) | +| range_too_large | [bool](#cockroach.server.serverpb.RangeResponse-bool) | | | [reserved](#support-status) | @@ -5406,6 +5410,46 @@ Support status: [reserved](#support-status) +## UpdateTableMetadataCache + + + + + +Support status: [reserved](#support-status) + +#### Request Parameters + + + + + + + +| Field | Type | Label | Description | Support status | +| ----- | ---- | ----- | ----------- | -------------- | +| local | [bool](#cockroach.server.serverpb.UpdateTableMetadataCacheRequest-bool) | | If true, the server will attempt to send a signal to the table metadata job by notifying the channel set on the status server. | [reserved](#support-status) | + + + + + + + +#### Response Parameters + + + + + + + + + + + + + ## Users `GET /_admin/v1/users` @@ -8025,3 +8069,52 @@ Support status: [reserved](#support-status) +## ReadFromTenantInfo + + + +ReadFromTenantInfo returns the tenant from which the requesting tenant +should read, if any. + +Support status: [reserved](#support-status) + +#### Request Parameters + + + + +ReadFromTenantInfoRequest requests info, if any, on which tenant the caller +should read from. + + +| Field | Type | Label | Description | Support status | +| ----- | ---- | ----- | ----------- | -------------- | +| tenant_id | [cockroach.roachpb.TenantID](#cockroach.server.serverpb.ReadFromTenantInfoRequest-cockroach.roachpb.TenantID) | | TenantID should always be the ID of the tenant making the request. This duplicates the ID in the auth context that is added implicitly, and must always match that ID when that ID is present, however that ID is absent in insecure test clusters which is why we also specify it explicitly here. | [reserved](#support-status) | + + + + + + + +#### Response Parameters + + + + +ReadFromTenantInfoResponse instructs a tenant as to which tenant, if any, it +should configure itself to read from and the timestamp at which it should do +so. + + +| Field | Type | Label | Description | Support status | +| ----- | ---- | ----- | ----------- | -------------- | +| read_from | [cockroach.roachpb.TenantID](#cockroach.server.serverpb.ReadFromTenantInfoResponse-cockroach.roachpb.TenantID) | | | [reserved](#support-status) | +| read_at | [cockroach.util.hlc.Timestamp](#cockroach.server.serverpb.ReadFromTenantInfoResponse-cockroach.util.hlc.Timestamp) | | | [reserved](#support-status) | + + + + + + + diff --git a/docs/generated/metrics/metrics.html b/docs/generated/metrics/metrics.html index d331e7ebb08d..e512194c64f1 100644 --- a/docs/generated/metrics/metrics.html +++ b/docs/generated/metrics/metrics.html @@ -12,8 +12,8 @@ STORAGEadmission.admitted.elastic-cpu.bulk-normal-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.elastic-cpu.normal-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.elastic-storesNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEadmission.admitted.elastic-stores.bulk-low-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.elastic-stores.bulk-normal-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE -STORAGEadmission.admitted.elastic-stores.ttl-low-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.kvNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.kv-storesNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.admitted.kv-stores.high-priNumber of requests admittedRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE @@ -47,8 +47,8 @@ STORAGEadmission.errored.elastic-cpu.bulk-normal-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.elastic-cpu.normal-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.elastic-storesNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEadmission.errored.elastic-stores.bulk-low-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.elastic-stores.bulk-normal-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE -STORAGEadmission.errored.elastic-stores.ttl-low-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.kvNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.kv-storesNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.errored.kv-stores.high-priNumber of requests not admitted due to errorRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE @@ -94,8 +94,8 @@ STORAGEadmission.requested.elastic-cpu.bulk-normal-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.elastic-cpu.normal-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.elastic-storesNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEadmission.requested.elastic-stores.bulk-low-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.elastic-stores.bulk-normal-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE -STORAGEadmission.requested.elastic-stores.ttl-low-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.kvNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.kv-storesNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEadmission.requested.kv-stores.high-priNumber of requestsRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE @@ -121,8 +121,8 @@ STORAGEadmission.wait_durations.elastic-cpu.bulk-normal-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.elastic-cpu.normal-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.elastic-storesWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE +STORAGEadmission.wait_durations.elastic-stores.bulk-low-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.elastic-stores.bulk-normal-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE -STORAGEadmission.wait_durations.elastic-stores.ttl-low-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.kvWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.kv-storesWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE STORAGEadmission.wait_durations.kv-stores.high-priWait time durations for requests that waitedWait time DurationHISTOGRAMNANOSECONDSAVGNONE @@ -147,8 +147,8 @@ STORAGEadmission.wait_queue_length.elastic-cpu.bulk-normal-priLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.elastic-cpu.normal-priLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.elastic-storesLength of wait queueRequestsGAUGECOUNTAVGNONE +STORAGEadmission.wait_queue_length.elastic-stores.bulk-low-priLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.elastic-stores.bulk-normal-priLength of wait queueRequestsGAUGECOUNTAVGNONE -STORAGEadmission.wait_queue_length.elastic-stores.ttl-low-priLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.kvLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.kv-storesLength of wait queueRequestsGAUGECOUNTAVGNONE STORAGEadmission.wait_queue_length.kv-stores.high-priLength of wait queueRequestsGAUGECOUNTAVGNONE @@ -308,6 +308,40 @@ STORAGEkvadmission.flow_token_dispatch.pending_regularNumber of pending regular flow token dispatchesDispatchesGAUGECOUNTAVGNONE STORAGEkvadmission.flow_token_dispatch.remote_elasticNumber of remote elastic flow token dispatchesDispatchesCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEkvadmission.flow_token_dispatch.remote_regularNumber of remote regular flow token dispatchesDispatchesCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.elastic.durationLatency histogram for time elastic requests spent waiting for flow tokens to evaluateNanosecondsHISTOGRAMNANOSECONDSAVGNONE +STORAGEkvflowcontrol.eval_wait.elastic.requests.admittedNumber of elastic requests admitted by the flow controllerRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.elastic.requests.bypassedNumber of waiting elastic requests that bypassed the flow controller due the evaluating replica not being the leaderRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.elastic.requests.erroredNumber of elastic requests that errored out while waiting for flow tokensRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.elastic.requests.waitingNumber of elastic requests waiting for flow tokensRequestsGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.eval_wait.regular.durationLatency histogram for time regular requests spent waiting for flow tokens to evaluateNanosecondsHISTOGRAMNANOSECONDSAVGNONE +STORAGEkvflowcontrol.eval_wait.regular.requests.admittedNumber of regular requests admitted by the flow controllerRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.regular.requests.bypassedNumber of waiting regular requests that bypassed the flow controller due the evaluating replica not being the leaderRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.regular.requests.erroredNumber of regular requests that errored out while waiting for flow tokensRequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.eval_wait.regular.requests.waitingNumber of regular requests waiting for flow tokensRequestsGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.eval.elastic.blocked_countNumber of eval replication streams with no flow tokens available for elastic requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.eval.elastic.total_countTotal number of eval replication streams for elastic requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.eval.regular.blocked_countNumber of eval replication streams with no flow tokens available for regular requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.eval.regular.total_countTotal number of eval replication streams for regular requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.send.elastic.blocked_countNumber of send replication streams with no flow tokens available for elastic requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.send.elastic.total_countTotal number of send replication streams for elastic requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.send.regular.blocked_countNumber of send replication streams with no flow tokens available for regular requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.streams.send.regular.total_countTotal number of send replication streams for regular requestsCountGAUGECOUNTAVGNONE +STORAGEkvflowcontrol.tokens.eval.elastic.availableFlow eval tokens available for elastic requests, across all replication streamsBytesGAUGEBYTESAVGNONE +STORAGEkvflowcontrol.tokens.eval.elastic.deductedFlow eval tokens deducted by elastic requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.eval.elastic.returnedFlow eval tokens returned by elastic requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.eval.elastic.unaccountedFlow eval tokens returned by elastic requests that were unaccounted for, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.eval.regular.availableFlow eval tokens available for regular requests, across all replication streamsBytesGAUGEBYTESAVGNONE +STORAGEkvflowcontrol.tokens.eval.regular.deductedFlow eval tokens deducted by regular requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.eval.regular.returnedFlow eval tokens returned by regular requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.eval.regular.unaccountedFlow eval tokens returned by regular requests that were unaccounted for, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.elastic.availableFlow send tokens available for elastic requests, across all replication streamsBytesGAUGEBYTESAVGNONE +STORAGEkvflowcontrol.tokens.send.elastic.deductedFlow send tokens deducted by elastic requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.elastic.returnedFlow send tokens returned by elastic requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.elastic.unaccountedFlow send tokens returned by elastic requests that were unaccounted for, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.regular.availableFlow send tokens available for regular requests, across all replication streamsBytesGAUGEBYTESAVGNONE +STORAGEkvflowcontrol.tokens.send.regular.deductedFlow send tokens deducted by regular requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.regular.returnedFlow send tokens returned by regular requests, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE +STORAGEkvflowcontrol.tokens.send.regular.unaccountedFlow send tokens returned by regular requests that were unaccounted for, across all replication streamsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE STORAGEleases.epochNumber of replica leaseholders using epoch-based leasesReplicasGAUGECOUNTAVGNONE STORAGEleases.errorNumber of failed lease requestsLease RequestsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE STORAGEleases.expirationNumber of replica leaseholders using expiration-based leasesReplicasGAUGECOUNTAVGNONE @@ -533,6 +567,7 @@ STORAGErangekeybytesNumber of bytes taken up by range keys (e.g. MVCC range tombstones)StorageGAUGEBYTESAVGNONE STORAGErangekeycountCount of all range keys (e.g. MVCC range tombstones)KeysGAUGECOUNTAVGNONE STORAGErangesNumber of rangesRangesGAUGECOUNTAVGNONE +STORAGEranges.decommissioningNumber of ranges with at lease one replica on a decommissioning nodeRangesGAUGECOUNTAVGNONE STORAGEranges.overreplicatedNumber of ranges with more live replicas than the replication targetRangesGAUGECOUNTAVGNONE STORAGEranges.unavailableNumber of ranges with fewer live replicas than needed for quorumRangesGAUGECOUNTAVGNONE STORAGEranges.underreplicatedNumber of ranges with fewer live replicas than the replication targetRangesGAUGECOUNTAVGNONE @@ -1050,6 +1085,18 @@ APPLICATIONjobs.auto_config_task.resume_completedNumber of auto_config_task jobs which successfully resumed to completionjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONjobs.auto_config_task.resume_failedNumber of auto_config_task jobs which failed with a non-retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONjobs.auto_config_task.resume_retry_errorNumber of auto_config_task jobs which failed with a retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.currently_idleNumber of auto_create_partial_stats jobs currently considered Idle and can be freely shut downjobsGAUGECOUNTAVGNONE +APPLICATIONjobs.auto_create_partial_stats.currently_pausedNumber of auto_create_partial_stats jobs currently considered PausedjobsGAUGECOUNTAVGNONE +APPLICATIONjobs.auto_create_partial_stats.currently_runningNumber of auto_create_partial_stats jobs currently running in Resume or OnFailOrCancel statejobsGAUGECOUNTAVGNONE +APPLICATIONjobs.auto_create_partial_stats.expired_pts_recordsNumber of expired protected timestamp records owned by auto_create_partial_stats jobsrecordsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.fail_or_cancel_completedNumber of auto_create_partial_stats jobs which successfully completed their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.fail_or_cancel_failedNumber of auto_create_partial_stats jobs which failed with a non-retriable error on their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.fail_or_cancel_retry_errorNumber of auto_create_partial_stats jobs which failed with a retriable error on their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.protected_age_secThe age of the oldest PTS record protected by auto_create_partial_stats jobssecondsGAUGESECONDSAVGNONE +APPLICATIONjobs.auto_create_partial_stats.protected_record_countNumber of protected timestamp records held by auto_create_partial_stats jobsrecordsGAUGECOUNTAVGNONE +APPLICATIONjobs.auto_create_partial_stats.resume_completedNumber of auto_create_partial_stats jobs which successfully resumed to completionjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.resume_failedNumber of auto_create_partial_stats jobs which failed with a non-retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.auto_create_partial_stats.resume_retry_errorNumber of auto_create_partial_stats jobs which failed with a retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONjobs.auto_create_stats.currently_idleNumber of auto_create_stats jobs currently considered Idle and can be freely shut downjobsGAUGECOUNTAVGNONE APPLICATIONjobs.auto_create_stats.currently_pausedNumber of auto_create_stats jobs currently considered PausedjobsGAUGECOUNTAVGNONE APPLICATIONjobs.auto_create_stats.currently_runningNumber of auto_create_stats jobs currently running in Resume or OnFailOrCancel statejobsGAUGECOUNTAVGNONE @@ -1350,6 +1397,18 @@ APPLICATIONjobs.typedesc_schema_change.resume_completedNumber of typedesc_schema_change jobs which successfully resumed to completionjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONjobs.typedesc_schema_change.resume_failedNumber of typedesc_schema_change jobs which failed with a non-retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONjobs.typedesc_schema_change.resume_retry_errorNumber of typedesc_schema_change jobs which failed with a retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.currently_idleNumber of update_table_metadata_cache jobs currently considered Idle and can be freely shut downjobsGAUGECOUNTAVGNONE +APPLICATIONjobs.update_table_metadata_cache.currently_pausedNumber of update_table_metadata_cache jobs currently considered PausedjobsGAUGECOUNTAVGNONE +APPLICATIONjobs.update_table_metadata_cache.currently_runningNumber of update_table_metadata_cache jobs currently running in Resume or OnFailOrCancel statejobsGAUGECOUNTAVGNONE +APPLICATIONjobs.update_table_metadata_cache.expired_pts_recordsNumber of expired protected timestamp records owned by update_table_metadata_cache jobsrecordsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.fail_or_cancel_completedNumber of update_table_metadata_cache jobs which successfully completed their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.fail_or_cancel_failedNumber of update_table_metadata_cache jobs which failed with a non-retriable error on their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.fail_or_cancel_retry_errorNumber of update_table_metadata_cache jobs which failed with a retriable error on their failure or cancelation processjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.protected_age_secThe age of the oldest PTS record protected by update_table_metadata_cache jobssecondsGAUGESECONDSAVGNONE +APPLICATIONjobs.update_table_metadata_cache.protected_record_countNumber of protected timestamp records held by update_table_metadata_cache jobsrecordsGAUGECOUNTAVGNONE +APPLICATIONjobs.update_table_metadata_cache.resume_completedNumber of update_table_metadata_cache jobs which successfully resumed to completionjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.resume_failedNumber of update_table_metadata_cache jobs which failed with a non-retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONjobs.update_table_metadata_cache.resume_retry_errorNumber of update_table_metadata_cache jobs which failed with a retriable errorjobsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONkv.protectedts.reconciliation.errorsnumber of errors encountered during reconciliation runs on this nodeCountCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONkv.protectedts.reconciliation.num_runsnumber of successful reconciliation runs on this nodeCountCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONkv.protectedts.reconciliation.records_processednumber of records processed without error during reconciliation on this nodeCountCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE @@ -1369,6 +1428,7 @@ APPLICATIONlogical_replication.flush_bytesNumber of bytes in a given flushLogical bytesHISTOGRAMBYTESAVGNONE APPLICATIONlogical_replication.flush_hist_nanosTime spent flushing messages across all replication streamsNanosecondsHISTOGRAMNANOSECONDSAVGNONE APPLICATIONlogical_replication.flush_row_countNumber of rows in a given flushRowsHISTOGRAMCOUNTAVGNONE +APPLICATIONlogical_replication.kv_write_fallback_countTotal number of times the kv write path could not handle a row update and fell back to SQL insteadEventsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONlogical_replication.logical_bytesLogical bytes (sum of keys + values) received by all replication jobsBytesCOUNTERBYTESAVGNON_NEGATIVE_DERIVATIVE APPLICATIONlogical_replication.optimistic_insert_conflict_countTotal number of times the optimistic insert encountered a conflictEventsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONlogical_replication.replan_countTotal number of dist sql replanning eventsEventsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE @@ -1652,6 +1712,7 @@ APPLICATIONsqlliveness.sessions_deletion_runsNumber of calls to delete sessions which have been performedSessionsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONsqlliveness.write_failuresNumber of update or insert calls which have failedWritesCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONsqlliveness.write_successesNumber of update or insert calls successfully performedWritesCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE +APPLICATIONtablemetadatacache.update_job.runsThe total number of runs of the update table metadata job.ExecutionsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONtenant.cost_client.blocked_requestsNumber of requests currently blocked by the rate limiterRequestsGAUGECOUNTAVGNONE APPLICATIONtenant.sql_usage.cross_region_network_ruTotal number of RUs charged for cross-region network trafficRequest UnitsCOUNTERCOUNTAVGNON_NEGATIVE_DERIVATIVE APPLICATIONtenant.sql_usage.estimated_cpu_secondsEstimated amount of CPU consumed by a virtual clusterCPU SecondsCOUNTERSECONDSAVGNON_NEGATIVE_DERIVATIVE diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt index f99306bfc433..876c2eab9367 100644 --- a/docs/generated/settings/settings-for-tenants.txt +++ b/docs/generated/settings/settings-for-tenants.txt @@ -340,6 +340,9 @@ sql.stats.activity.persisted_rows.max integer 200000 maximum number of rows of s sql.stats.automatic_collection.enabled boolean true automatic statistics collection mode application sql.stats.automatic_collection.fraction_stale_rows float 0.2 target fraction of stale rows per table that will trigger a statistics refresh application sql.stats.automatic_collection.min_stale_rows integer 500 target minimum number of stale rows per table that will trigger a statistics refresh application +sql.stats.automatic_partial_collection.enabled boolean false automatic partial statistics collection mode application +sql.stats.automatic_partial_collection.fraction_stale_rows float 0.05 target fraction of stale rows per table that will trigger a partial statistics refresh application +sql.stats.automatic_partial_collection.min_stale_rows integer 100 target minimum number of stale rows per table that will trigger a partial statistics refresh application sql.stats.cleanup.recurrence string @hourly cron-tab recurrence for SQL Stats cleanup job application sql.stats.flush.enabled boolean true if set, SQL execution statistics are periodically flushed to disk application sql.stats.flush.interval duration 10m0s the interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hour application @@ -348,6 +351,8 @@ sql.stats.forecasts.max_decrease float 0.3333333333333333 the most a prediction sql.stats.forecasts.min_goodness_of_fit float 0.95 the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast application sql.stats.forecasts.min_observations integer 3 the mimimum number of observed statistics required to produce a statistics forecast application sql.stats.histogram_buckets.count integer 200 maximum number of histogram buckets to build during table statistics collection application +sql.stats.histogram_buckets.include_most_common_values.enabled boolean true whether to include most common values as histogram buckets application +sql.stats.histogram_buckets.max_fraction_most_common_values float 0.1 maximum fraction of histogram buckets to use for most common values application sql.stats.histogram_collection.enabled boolean true histogram collection mode application sql.stats.histogram_samples.count integer 0 number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size. application sql.stats.multi_column_collection.enabled boolean true multi-column statistics collection mode application @@ -377,6 +382,7 @@ sql.ttl.default_select_batch_size integer 500 default amount of rows to select i sql.ttl.default_select_rate_limit integer 0 default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit. application sql.ttl.job.enabled boolean true whether the TTL job is enabled application sql.txn.read_committed_isolation.enabled boolean true set to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commands application +sql.txn.repeatable_read_isolation.enabled (alias: sql.txn.snapshot_isolation.enabled) boolean false set to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commands application sql.txn_fingerprint_id_cache.capacity integer 100 the maximum number of txn fingerprint IDs stored application storage.ingestion.value_blocks.enabled boolean true set to true to enable writing of value blocks in ingestion sstables application storage.max_sync_duration duration 20s maximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crash system-visible @@ -392,4 +398,4 @@ trace.snapshot.rate duration 0s if non-zero, interval at which background trace trace.span_registry.enabled boolean true if set, ongoing traces can be seen at https:///#/debug/tracez application trace.zipkin.collector string the address of a Zipkin instance to receive traces, as :. If no port is specified, 9411 will be used. application ui.display_timezone enumeration etc/utc the timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1] application -version version 1000024.2-upgrading-to-1000024.3-step-006 set the active cluster version in the format '.' application +version version 1000024.2-upgrading-to-1000024.3-step-014 set the active cluster version in the format '.' application diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index d3f4a4474cd0..7e2213ed2160 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -107,6 +107,7 @@
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.raft.leader_fortification.fraction_enabled
float0controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Dedicated/Self-Hosted +
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsDedicated/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted @@ -292,6 +293,9 @@
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted +
sql.stats.automatic_partial_collection.enabled
booleanfalseautomatic partial statistics collection modeServerless/Dedicated/Self-Hosted +
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted +
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted @@ -300,6 +304,8 @@
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted +
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsServerless/Dedicated/Self-Hosted +
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Serverless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted @@ -329,6 +335,7 @@
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted +
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.experimental.eventually_file_only_snapshots.enabled
booleantrueset to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false)Dedicated/Self-Hosted
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted @@ -348,6 +355,6 @@
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted -
version
version1000024.2-upgrading-to-1000024.3-step-006set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted +
version
version1000024.2-upgrading-to-1000024.3-step-014set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted diff --git a/docs/generated/sql/bnf/alter_func_stmt.bnf b/docs/generated/sql/bnf/alter_func_stmt.bnf index 3620edcf23c6..3d2b4368831c 100644 --- a/docs/generated/sql/bnf/alter_func_stmt.bnf +++ b/docs/generated/sql/bnf/alter_func_stmt.bnf @@ -1,5 +1,5 @@ alter_func_stmt ::= - ( 'ALTER' 'FUNCTION' function_with_paramtypes ( ( ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ( ( ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) )* ) ( 'RESTRICT' | ) ) + ( 'ALTER' 'FUNCTION' function_with_paramtypes ( ( ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ( ( ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) )* ) ( 'RESTRICT' | ) ) | ( 'ALTER' 'FUNCTION' function_with_paramtypes 'RENAME' 'TO' function_new_name ) | ( 'ALTER' 'FUNCTION' function_with_paramtypes 'OWNER' 'TO' role_spec ) | ( 'ALTER' 'FUNCTION' function_with_paramtypes 'SET' 'SCHEMA' schema_name ) diff --git a/docs/generated/sql/bnf/create_func.bnf b/docs/generated/sql/bnf/create_func.bnf index 90ae04a97ba4..4e162a18fd5d 100644 --- a/docs/generated/sql/bnf/create_func.bnf +++ b/docs/generated/sql/bnf/create_func.bnf @@ -1,3 +1,3 @@ create_func_stmt ::= - 'CREATE' ( 'OR' 'REPLACE' | ) 'FUNCTION' routine_create_name '(' ( ( ( ( routine_param | routine_param | routine_param ) ) ( ( ',' ( routine_param | routine_param | routine_param ) ) )* ) | ) ')' 'RETURNS' ( 'SETOF' | ) routine_return_type ( ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) )* ) | ) - | 'CREATE' ( 'OR' 'REPLACE' | ) 'FUNCTION' routine_create_name '(' ( ( ( ( routine_param | routine_param | routine_param ) ) ( ( ',' ( routine_param | routine_param | routine_param ) ) )* ) | ) ')' ( ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) )* ) | ) + 'CREATE' ( 'OR' 'REPLACE' | ) 'FUNCTION' routine_create_name '(' ( ( ( ( routine_param | routine_param | routine_param ) ) ( ( ',' ( routine_param | routine_param | routine_param ) ) )* ) | ) ')' 'RETURNS' ( 'SETOF' | ) routine_return_type ( ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) )* ) | ) + | 'CREATE' ( 'OR' 'REPLACE' | ) 'FUNCTION' routine_create_name '(' ( ( ( ( routine_param | routine_param | routine_param ) ) ( ( ',' ( routine_param | routine_param | routine_param ) ) )* ) | ) ')' ( ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) ( ( ( 'AS' routine_body_str | 'LANGUAGE' ('SQL' | 'PLPGSQL') | ( 'CALLED' 'ON' 'NULL' 'INPUT' | 'RETURNS' 'NULL' 'ON' 'NULL' 'INPUT' | 'STRICT' | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' | 'EXTERNAL' 'SECURITY' 'DEFINER' | 'EXTERNAL' 'SECURITY' 'INVOKER' | 'SECURITY' 'DEFINER' | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' ) ) ) )* ) | ) diff --git a/docs/generated/sql/bnf/fingerprint_options.bnf b/docs/generated/sql/bnf/fingerprint_options.bnf index 3f4acb04db4e..78b6983806d8 100644 --- a/docs/generated/sql/bnf/fingerprint_options.bnf +++ b/docs/generated/sql/bnf/fingerprint_options.bnf @@ -1,2 +1,3 @@ fingerprint_options ::= 'START' 'TIMESTAMP' '=' d_expr + | 'EXCLUDE' 'COLUMNS' '=' string_or_placeholder_opt_list diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index bb9163d130b1..a3dd26e247d7 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -1306,6 +1306,7 @@ unreserved_keyword ::= | 'NOWAIT' | 'NULLS' | 'IGNORE_FOREIGN_KEYS' + | 'IGNORE_CDC_IGNORED_TTL_DELETES' | 'INSENSITIVE' | 'OF' | 'OFF' @@ -3552,6 +3553,10 @@ common_routine_opt_item ::= | 'IMMUTABLE' | 'STABLE' | 'VOLATILE' + | 'EXTERNAL' 'SECURITY' 'DEFINER' + | 'EXTERNAL' 'SECURITY' 'INVOKER' + | 'SECURITY' 'DEFINER' + | 'SECURITY' 'INVOKER' | 'LEAKPROOF' | 'NOT' 'LEAKPROOF' @@ -3874,6 +3879,7 @@ bare_label_keywords ::= | 'IFERROR' | 'IFNULL' | 'IGNORE_FOREIGN_KEYS' + | 'IGNORE_CDC_IGNORED_TTL_DELETES' | 'ILIKE' | 'IMMEDIATE' | 'IMMEDIATELY' diff --git a/go.mod b/go.mod index 4e792fda5413..0b3e9a0b573f 100644 --- a/go.mod +++ b/go.mod @@ -10,17 +10,17 @@ go 1.22.5 // merge of any upgrades we should communicate to all teams to be on the lookout // for behavior changes, just like we would after a go upgrade. require ( - golang.org/x/crypto v0.23.0 + golang.org/x/crypto v0.26.0 golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.24.0 + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.20.0 - golang.org/x/text v0.15.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.23.0 + golang.org/x/text v0.17.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.17.0 + golang.org/x/tools v0.24.0 ) // The following dependencies are key infrastructure dependencies and @@ -56,20 +56,20 @@ require ( github.com/Azure/go-autorest/autorest/azure/cli v0.4.3 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/aws/aws-sdk-go v1.40.37 - github.com/aws/aws-sdk-go-v2 v1.19.0 - github.com/aws/aws-sdk-go-v2/config v1.18.28 - github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect + github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.18.3 github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0 github.com/aws/aws-sdk-go-v2/service/iam v1.18.3 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect github.com/aws/aws-sdk-go-v2/service/rds v1.18.4 - github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 + github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 ) // If any of the following dependencies get update as a side-effect @@ -107,7 +107,11 @@ require ( github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01 github.com/apache/arrow/go/v11 v11.0.0 github.com/apache/pulsar-client-go v0.12.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16 + github.com/aws/aws-sdk-go-v2/service/kms v1.35.5 + github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2 + github.com/aws/smithy-go v1.20.4 github.com/axiomhq/hyperloglog v0.0.0-20181223111420-4b99d0c2c99e github.com/bazelbuild/rules_go v0.26.0 github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8 @@ -126,7 +130,7 @@ require ( github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55 github.com/cockroachdb/gostdlib v1.19.0 github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b - github.com/cockroachdb/pebble v0.0.0-20240816131806-ac08db2964cd + github.com/cockroachdb/pebble v0.0.0-20240904144802-32cf8823809b github.com/cockroachdb/redact v1.1.5 github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd github.com/cockroachdb/stress v0.0.0-20220803192808-1806698b1b7b @@ -166,13 +170,14 @@ require ( github.com/grafana/grafana-openapi-client-go v0.0.0-20240215164046-eb0e60d27cb7 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/guptarohit/asciigraph v0.5.5 + github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 github.com/irfansharif/recorder v0.0.0-20211218081646-a21b46510fd6 github.com/jackc/pgx/v5 v5.4.2 github.com/jaegertracing/jaeger v1.18.1 github.com/jordan-wright/email v4.0.1-0.20210109023952-943e75fe5223+incompatible github.com/jordanlewis/gcassert v0.0.0-20240401195008-3141cbd028c0 github.com/kevinburke/go-bindata v3.13.0+incompatible - github.com/kisielk/errcheck v1.6.1-0.20210625163953-8ddee489636a + github.com/kisielk/errcheck v1.7.0 github.com/kisielk/gotool v1.0.0 github.com/klauspost/compress v1.17.8 github.com/klauspost/pgzip v1.2.5 @@ -182,7 +187,7 @@ require ( github.com/kr/text v0.2.0 github.com/kylelemons/godebug v1.1.0 github.com/leanovate/gopter v0.2.5-0.20190402064358-634a59d12406 - github.com/lestrrat-go/jwx v1.2.25 + github.com/lestrrat-go/jwx/v2 v2.1.1 github.com/lib/pq v1.10.7 github.com/linkedin/goavro/v2 v2.12.0 github.com/lufia/iostat v1.2.1 @@ -240,7 +245,7 @@ require ( go.opentelemetry.io/otel/trace v1.17.0 go.opentelemetry.io/proto/otlp v0.11.0 golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5 - golang.org/x/term v0.20.0 + golang.org/x/term v0.23.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 honnef.co/go/tools v0.4.5 @@ -274,14 +279,19 @@ require ( github.com/Microsoft/go-winio v0.5.2 // indirect github.com/abbot/go-http-auth v0.4.1-0.20181019201920-860ed7f246ff // indirect github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 // indirect + github.com/ajstarks/svgo v0.0.0-20210923152817-c3b6e2f0c527 // indirect github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect github.com/andybalholm/brotli v1.0.5 // indirect github.com/apache/thrift v0.16.0 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect - github.com/aws/smithy-go v1.13.5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect github.com/aymanbagabas/go-osc52 v1.0.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect @@ -292,7 +302,8 @@ require ( github.com/charmbracelet/lipgloss v0.6.0 // indirect github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964 // indirect github.com/danieljoos/wincred v1.1.2 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/deepmap/oapi-codegen v1.6.0 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/djherbis/atime v1.1.0 // indirect @@ -302,9 +313,12 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/fatih/structs v1.1.0 // indirect + github.com/fogleman/gg v1.3.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-fonts/liberation v0.2.0 // indirect github.com/go-kit/log v0.2.1 // indirect + github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -318,12 +332,14 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/go-pdf/fpdf v0.5.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/uuid v4.0.0+incompatible // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect @@ -337,6 +353,7 @@ require ( github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jackc/puddle/v2 v2.2.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -350,11 +367,11 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect - github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect - github.com/lestrrat-go/blackmagic v1.0.1 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect - github.com/lestrrat-go/option v1.0.0 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -393,6 +410,7 @@ require ( github.com/rs/xid v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sahilm/fuzzy v0.1.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/slok/go-http-metrics v0.10.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -416,7 +434,9 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.0 // indirect + golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + gonum.org/v1/plot v0.10.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect ) diff --git a/go.sum b/go.sum index 17a60f79b68d..c994e6713629 100644 --- a/go.sum +++ b/go.sum @@ -269,6 +269,7 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20210923152817-c3b6e2f0c527 h1:NImof/JkF93OVWZY+PINgl6fPtQyF6f+hNUtZ0QZA1c= github.com/ajstarks/svgo v0.0.0-20210923152817-c3b6e2f0c527/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -347,46 +348,63 @@ github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.19.0 h1:klAT+y3pGFBU/qVf1uzwttpBbiuozJYWzNLHioyDJ+k= -github.com/aws/aws-sdk-go-v2 v1.19.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/config v1.18.28 h1:TINEaKyh1Td64tqFvn09iYpKiWjmHYrG1fa91q2gnqw= -github.com/aws/aws-sdk-go-v2/config v1.18.28/go.mod h1:nIL+4/8JdAuNHEjn/gPEXqtnS02Q3NXB/9Z7o5xE4+A= -github.com/aws/aws-sdk-go-v2/credentials v1.13.27 h1:dz0yr/yR1jweAnsCx+BmjerUILVPQ6FS5AwF/OyG1kA= -github.com/aws/aws-sdk-go-v2/credentials v1.13.27/go.mod h1:syOqAek45ZXZp29HlnRS/BNgMIW6uiRmeuQsz4Qh2UE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 h1:kP3Me6Fy3vdi+9uHd7YLr6ewPxRL+PU6y15urfTaamU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5/go.mod h1:Gj7tm95r+QsDoN2Fhuz/3npQvcZbkEf5mL70n3Xfluc= +github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16 h1:1FWqcOnvnO0lRsv0kLACwwQquoZIoS5tD0MtfoNdnkk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16/go.mod h1:+E8OuB446P/5Swajo40TqenLMzm6aYDEEz6FZDn/u1E= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 h1:hMUCiE3Zi5AHrRNGf5j985u0WyqI6r2NULhUfo0N/No= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35/go.mod h1:ipR5PvpSPqIqL5Mi82BxLnfMkHVbmco8kUwO2xrCi0M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 h1:yOpYx+FTBdpk/g+sBU6Cb1H0U/TLEcYYp66mYqsPpcc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29/go.mod h1:M/eUABlDbw2uVrdAn+UsI6M727qp2fxkp8K0ejcBDUY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 h1:8r5m1BoAWkn0TDC34lUculryf7nUF25EgIMdjvGCkgo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36/go.mod h1:Rmw2M1hMVTwiUhjwMoIBFWFJMhvJbct06sSidxInkhY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.18.3 h1:1OIa4zpY43n6Q5IX9b5mZWeq/KTV5243tXq5ABWlufI= github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.18.3/go.mod h1:/n2C9Hm6TK02P0xW9GFxPGKHhcdzg5D2g0qgIL1NuAo= github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0 h1:dfWleW7/a3+TR6qJynYZsaovCEStQOep5x+BxkiBDhc= github.com/aws/aws-sdk-go-v2/service/ec2 v1.34.0/go.mod h1:37MWOQMGyj8lcranOwo716OHvJgeFJUOaWu6vk1pWNE= github.com/aws/aws-sdk-go-v2/service/iam v1.18.3 h1:wllKL2fLtvfaNAVbXKMRmM/mD1oDNw0hXmDn8mE/6Us= github.com/aws/aws-sdk-go-v2/service/iam v1.18.3/go.mod h1:51xGfEjd1HXnTzw2mAp++qkRo+NyGYblZkuGTsb49yw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 h1:IiDolu/eLmuB18DRZibj77n1hHQT7z12jnGO7Ze3pLc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29/go.mod h1:fDbkK4o7fpPXWn8YAPmTieAMuB9mk/VgvW64uaUqxd4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.5 h1:XUomV7SiclZl1QuXORdGcfFqHxEHET7rmNGtxTfNB+M= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.5/go.mod h1:A5CS0VRmxxj2YKYLCY08l/Zzbd01m6JZn0WzxgT1OCA= github.com/aws/aws-sdk-go-v2/service/rds v1.18.4 h1:o5uszX76RaDFxNvf6b8qZ3GF55EdFoPH8GnrROjtL4k= github.com/aws/aws-sdk-go-v2/service/rds v1.18.4/go.mod h1:u33weNg1XPt3iTVX2wVFIf7oAD7XmgkF640mnM8wQ5Q= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2 h1:QDVKb2VpuwzIslzshumxksayV5GkpqT+rkVvdPVrA9E= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.18.2/go.mod h1:jAeo/PdIJZuDSwsvxJS94G4d6h8tStj7WXVuKwLHWU8= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 h1:sWDv7cMITPcZ21QdreULwxOOAmE05JjEsT6fCDtDA9k= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.13/go.mod h1:DfX0sWuT46KpcqbMhJ9QWtxAIP1VozkDWf8VAkByjYY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 h1:BFubHS/xN5bjl818QaroN6mQdjneYQ+AOx44KNXlyH4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13/go.mod h1:BzqsVVFduubEmzrVtUFQQIQdFqvUItF8XUq2EnS8Wog= -github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 h1:e5mnydVdCVWxP+5rPAGi2PYxC7u2OZgH1ypC114H04U= -github.com/aws/aws-sdk-go-v2/service/sts v1.19.3/go.mod h1:yVGZA1CPkmUhBdA039jXNJJG7/6t+G+EBWmFq23xqnY= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/axiomhq/hyperloglog v0.0.0-20181223111420-4b99d0c2c99e h1:190ugM9MsyFauTkR/UqcHG/mn5nmFe6SvHJqEHIrtrA= github.com/axiomhq/hyperloglog v0.0.0-20181223111420-4b99d0c2c99e/go.mod h1:IOXAcuKIFq/mDyuQ4wyJuJ79XLMsmLM+5RdQ+vWrL7o= github.com/aymanbagabas/go-osc52 v1.0.3 h1:DTwqENW7X9arYimJrPeGZcV0ln14sGMt3pHZspWD+Mg= @@ -502,8 +520,8 @@ github.com/cockroachdb/cockroach-go/v2 v2.3.7 h1:nq5GYDuA2zIR/kdLkVLTg7oHTw0UbGU github.com/cockroachdb/cockroach-go/v2 v2.3.7/go.mod h1:1wNJ45eSXW9AnOc3skntW9ZUZz6gxrQK3cOj3rK+BC8= github.com/cockroachdb/crlfmt v0.0.0-20221214225007-b2fc5c302548 h1:i0bnjanlWAvM50wHMT7EFyxlt5HQusznWrkwl+HBIsU= github.com/cockroachdb/crlfmt v0.0.0-20221214225007-b2fc5c302548/go.mod h1:qtkxNlt5i3rrdirfJE/bQeW/IeLajKexErv7jEIV+Uc= -github.com/cockroachdb/crlib v0.0.0-20240729155931-991150b7e290 h1:oJGWhlrgtcPSzJEgFHo6VIQW2TCFR/OiByQr6yzjDkU= -github.com/cockroachdb/crlib v0.0.0-20240729155931-991150b7e290/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/crlib v0.0.0-20240816115810-1c502cdb7c1d h1:IgQRpDBWe+tThngFMwhWpRJ/oEe5ZUJHq5nBNqeiIeI= +github.com/cockroachdb/crlib v0.0.0-20240816115810-1c502cdb7c1d/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= @@ -522,8 +540,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= -github.com/cockroachdb/pebble v0.0.0-20240816131806-ac08db2964cd h1:RNp4IiAIiYl11SYv07M2913mcnr9CHCKBmOkU4kcojo= -github.com/cockroachdb/pebble v0.0.0-20240816131806-ac08db2964cd/go.mod h1:ZPY1ov+eiufAtndK/VXuayN8CpxUXRMM7pprFLc7zxI= +github.com/cockroachdb/pebble v0.0.0-20240904144802-32cf8823809b h1:ZF9xYuVST8W758JvhvGiWN0NLQVLq5pqHhd/unF8Bds= +github.com/cockroachdb/pebble v0.0.0-20240904144802-32cf8823809b/go.mod h1:SU2rHRPC7x12KOAFdytOIffRn+8R5x7BAwdr22rWSPQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= @@ -693,10 +711,9 @@ github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= @@ -807,6 +824,7 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -852,9 +870,12 @@ github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0 h1:jAkAWJP4S+OsrPLZM4/eC9iW7CtHy+HBXrEwZXWo5VM= github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -868,6 +889,7 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 h1:6zl3BbBhdnMkpSj2YY30qV3gDcVBGtFgVsV3+/i+mKQ= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= @@ -1006,6 +1028,7 @@ github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-pdf/fpdf v0.5.0 h1:GHpcYsiDV2hdo77VTOuTF9k1sN8F8IY7NjnCo9x+NPY= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= @@ -1050,9 +1073,8 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -1093,6 +1115,7 @@ github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8 github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/geo v0.0.0-20200319012246-673a6f80352d h1:C/hKUcHT483btRbeGkrRjJz+Zbcj8audldIi9tRJDCc= @@ -1383,11 +1406,13 @@ github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2 github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 h1:MBLCfcSsUyFPDJp6T7EoHp/Ph3Jkrm4EuUKLD2rUWHg= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= @@ -1534,8 +1559,8 @@ github.com/kevinburke/go-bindata v3.13.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.1-0.20210625163953-8ddee489636a h1:o2feyvFn8r1nJu/iVn89h0kT+CH6pROXZZHQO60NbrI= -github.com/kisielk/errcheck v1.6.1-0.20210625163953-8ddee489636a/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= @@ -1603,20 +1628,18 @@ github.com/leanovate/gopter v0.2.5-0.20190402064358-634a59d12406 h1:+OUpk+IVvmKU github.com/leanovate/gopter v0.2.5-0.20190402064358-634a59d12406/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A= -github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= -github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= -github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80= -github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/httprc v1.0.6 h1:qgmgIRhpvBqexMJjA/PmwSvhNk679oqD1RbovdCGW8k= +github.com/lestrrat-go/httprc v1.0.6/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx v1.2.25 h1:tAx93jN2SdPvFn08fHNAhqFJazn5mBBOB8Zli0g0otA= -github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= -github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/jwx/v2 v2.1.1 h1:Y2ltVl8J6izLYFs54BVcpXLv5msSW4o8eXwnzZLI32E= +github.com/lestrrat-go/jwx/v2 v2.1.1/go.mod h1:4LvZg7oxu6Q5VJwn7Mk/UwooNRnTHUpXBj2C4j3HNx0= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -2135,6 +2158,8 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sectioneight/md-to-godoc v0.0.0-20161108233149-55e43be6c335/go.mod h1:lPZq22klO8la1kyImIDhrGytugMV0TsrsZB55a+xxI0= github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= @@ -2517,13 +2542,12 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2553,6 +2577,7 @@ golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+o golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -2581,8 +2606,9 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2663,8 +2689,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2702,8 +2728,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2851,8 +2877,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2862,8 +2888,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2879,8 +2905,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2982,8 +3008,9 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3005,6 +3032,7 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.0 h1:ymLukg4XJlQnYUJCp+coQq5M7BsUJFk6XQE4HPflwdw= gonum.org/v1/plot v0.10.0/go.mod h1:JWIHJ7U20drSQb/aDpTetJzfC1KlAPldJLpkSy88dvQ= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20170206182103-3d017632ea10/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -3437,6 +3465,7 @@ modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= modernc.org/z v1.3.0/go.mod h1:+mvgLH814oDjtATDdT3rs84JnUIpkvAF5B8AVkNlE2g= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index d538cc74b6b3..f63502198eea 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -64,6 +64,7 @@ ALL_TESTS = [ "//pkg/ccl/logictestccl/tests/local-mixed-24.1:local-mixed-24_1_test", "//pkg/ccl/logictestccl/tests/local-mixed-24.2:local-mixed-24_2_test", "//pkg/ccl/logictestccl/tests/local-read-committed:local-read-committed_test", + "//pkg/ccl/logictestccl/tests/local-repeatable-read:local-repeatable-read_test", "//pkg/ccl/logictestccl/tests/local-vec-off:local-vec-off_test", "//pkg/ccl/logictestccl/tests/local:local_test", "//pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs:multiregion-15node-5region-3azs_test", @@ -335,6 +336,7 @@ ALL_TESTS = [ "//pkg/server/diagnostics:diagnostics_test", "//pkg/server/dumpstore:dumpstore_test", "//pkg/server/goroutinedumper:goroutinedumper_test", + "//pkg/server/license:license_test", "//pkg/server/pgurl:pgurl_test", "//pkg/server/privchecker:privchecker_test", "//pkg/server/profiler:profiler_test", @@ -608,6 +610,7 @@ ALL_TESTS = [ "//pkg/sql/sqlitelogictest/tests/local-mixed-24.1:local-mixed-24_1_test", "//pkg/sql/sqlitelogictest/tests/local-mixed-24.2:local-mixed-24_2_test", "//pkg/sql/sqlitelogictest/tests/local-read-committed:local-read-committed_test", + "//pkg/sql/sqlitelogictest/tests/local-repeatable-read:local-repeatable-read_test", "//pkg/sql/sqlitelogictest/tests/local-vec-off:local-vec-off_test", "//pkg/sql/sqlitelogictest/tests/local:local_test", "//pkg/sql/sqlliveness/slinstance:slinstance_test", @@ -623,6 +626,7 @@ ALL_TESTS = [ "//pkg/sql/stats:stats_test", "//pkg/sql/stmtdiagnostics:stmtdiagnostics_test", "//pkg/sql/syntheticprivilege:syntheticprivilege_test", + "//pkg/sql/tablemetadatacache:tablemetadatacache_test", "//pkg/sql/tests:tests_test", "//pkg/sql/ttl/ttlbase:ttlbase_test", "//pkg/sql/ttl/ttljob:ttljob_test", @@ -927,6 +931,7 @@ GO_TARGETS = [ "//pkg/ccl/logictestccl/tests/local-mixed-24.1:local-mixed-24_1_test", "//pkg/ccl/logictestccl/tests/local-mixed-24.2:local-mixed-24_2_test", "//pkg/ccl/logictestccl/tests/local-read-committed:local-read-committed_test", + "//pkg/ccl/logictestccl/tests/local-repeatable-read:local-repeatable-read_test", "//pkg/ccl/logictestccl/tests/local-vec-off:local-vec-off_test", "//pkg/ccl/logictestccl/tests/local:local_test", "//pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs:multiregion-15node-5region-3azs_test", @@ -1684,6 +1689,8 @@ GO_TARGETS = [ "//pkg/server/dumpstore:dumpstore_test", "//pkg/server/goroutinedumper:goroutinedumper", "//pkg/server/goroutinedumper:goroutinedumper_test", + "//pkg/server/license:license", + "//pkg/server/license:license_test", "//pkg/server/pgurl:pgurl", "//pkg/server/pgurl:pgurl_test", "//pkg/server/privchecker:privchecker", @@ -2237,6 +2244,7 @@ GO_TARGETS = [ "//pkg/sql/sqlitelogictest/tests/local-mixed-24.1:local-mixed-24_1_test", "//pkg/sql/sqlitelogictest/tests/local-mixed-24.2:local-mixed-24_2_test", "//pkg/sql/sqlitelogictest/tests/local-read-committed:local-read-committed_test", + "//pkg/sql/sqlitelogictest/tests/local-repeatable-read:local-repeatable-read_test", "//pkg/sql/sqlitelogictest/tests/local-vec-off:local-vec-off_test", "//pkg/sql/sqlitelogictest/tests/local:local_test", "//pkg/sql/sqlitelogictest:sqlitelogictest", @@ -2276,6 +2284,8 @@ GO_TARGETS = [ "//pkg/sql/syntheticprivilege:syntheticprivilege", "//pkg/sql/syntheticprivilege:syntheticprivilege_test", "//pkg/sql/syntheticprivilegecache:syntheticprivilegecache", + "//pkg/sql/tablemetadatacache:tablemetadatacache", + "//pkg/sql/tablemetadatacache:tablemetadatacache_test", "//pkg/sql/tests:tests", "//pkg/sql/tests:tests_test", "//pkg/sql/ttl/ttlbase:ttlbase", diff --git a/pkg/acceptance/cluster/certs.go b/pkg/acceptance/cluster/certs.go index 5b4d8b20191b..5359d5b99049 100644 --- a/pkg/acceptance/cluster/certs.go +++ b/pkg/acceptance/cluster/certs.go @@ -63,14 +63,16 @@ func GenerateCerts(ctx context.Context) func() { userScopes := []roachpb.TenantID{roachpb.SystemTenantID, roachpb.MustMakeTenantID(5)} maybePanic(security.CreateClientPair( certsDir, filepath.Join(certsDir, certnames.EmbeddedCAKey), - keyLen, 48*time.Hour, false, username.RootUserName(), userScopes, true /* generate pk8 key */)) + keyLen, 48*time.Hour, false, username.RootUserName(), userScopes, + nil /* tenantNames */, true /* generate pk8 key */)) // Test user. // Scope test user to system tenant and tenant ID 5 which is what we use by default for acceptance // tests. maybePanic(security.CreateClientPair( certsDir, filepath.Join(certsDir, certnames.EmbeddedCAKey), - keyLen, 48*time.Hour, false, username.TestUserName(), userScopes, true /* generate pk8 key */)) + keyLen, 48*time.Hour, false, username.TestUserName(), userScopes, + nil /* tenantNames */, true /* generate pk8 key */)) // Certs for starting a cockroach server. Key size is from cli/cert.go:defaultKeySize. maybePanic(security.CreateNodePair( diff --git a/pkg/acceptance/compose_test.go b/pkg/acceptance/compose_test.go index b7e4eff87ef8..d3ec8b65067c 100644 --- a/pkg/acceptance/compose_test.go +++ b/pkg/acceptance/compose_test.go @@ -44,21 +44,21 @@ func testCompose(t *testing.T, path string, exitCodeFrom string) { tmpComposeDir := t.TempDir() err := copyRunfiles(composeDir, tmpComposeDir) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } path = filepath.Join(tmpComposeDir, path) // If running under Bazel, export 2 environment variables that will be interpolated in docker-compose.yml files. cockroachBinary, err := filepath.Abs(*cluster.CockroachBinary) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } err = os.Setenv("COCKROACH_BINARY", cockroachBinary) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } err = os.Setenv("CERTS_DIR", cluster.AbsCertsDir()) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } } else { path = filepath.Join(composeDir, path) @@ -66,12 +66,12 @@ func testCompose(t *testing.T, path string, exitCodeFrom string) { uid := os.Getuid() err := os.Setenv("UID", strconv.Itoa(uid)) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } gid := os.Getgid() err = os.Setenv("GID", strconv.Itoa(gid)) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } cmd := exec.Command( "docker", diff --git a/pkg/base/config.go b/pkg/base/config.go index af09b9d80c79..0f45c0f36b8e 100644 --- a/pkg/base/config.go +++ b/pkg/base/config.go @@ -748,6 +748,14 @@ func (cfg RaftConfig) NodeLivenessDurations() (livenessActive, livenessRenewal t return } +// StoreLivenessDurations computes durations for store liveness heartbeat +// interval and liveness interval. +func (cfg RaftConfig) StoreLivenessDurations() (livenessInterval, heartbeatInterval time.Duration) { + livenessInterval = cfg.RangeLeaseDuration + heartbeatInterval = time.Duration(float64(livenessInterval) * livenessRenewalFraction) + return +} + // SentinelGossipTTL is time-to-live for the gossip sentinel. The sentinel // informs a node whether or not it's connected to the primary gossip network // and not just a partition. As such it must expire fairly quickly and be diff --git a/pkg/base/license.go b/pkg/base/license.go index 763573c9e0a6..7f2f7e0b1c4b 100644 --- a/pkg/base/license.go +++ b/pkg/base/license.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/metric" - "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) @@ -39,31 +38,32 @@ var CCLDistributionAndEnterpriseEnabled = func(st *cluster.Settings) bool { return CheckEnterpriseEnabled(st, "" /* feature */) == nil } -var licenseTTLMetadata = metric.Metadata{ +var LicenseTTLMetadata = metric.Metadata{ // This metric name isn't namespaced for backwards // compatibility. The prior version of this metric was manually // inserted into the prometheus output Name: "seconds_until_enterprise_license_expiry", - Help: "Seconds until enterprise license expiry (0 if no license present or running without enterprise features)", + Help: "Seconds until license expiry (0 if no license present)", Measurement: "Seconds", Unit: metric.Unit_SECONDS, } -// LicenseTTL is a metric gauge that measures the number of seconds -// until the current enterprise license (if any) expires. -var LicenseTTL = metric.NewGauge(licenseTTLMetadata) +var AdditionalLicenseTTLMetadata = metric.Metadata{ + Name: "seconds_until_license_expiry", + Help: "Seconds until license expiry (0 if no license present)", + Measurement: "Seconds", + Unit: metric.Unit_SECONDS, +} -// UpdateMetricOnLicenseChange is a function that's called on startup -// in order to connect the enterprise license setting update to the -// prometheus metric provided as an argument. -var UpdateMetricOnLicenseChange = func( +// GetLicenseTTL is a function which returns the TTL for the active cluster. +// The implementation here returns 0, but if utilccl is started this function is +// overridden with an appropriate getter. +var GetLicenseTTL = func( ctx context.Context, st *cluster.Settings, - metric *metric.Gauge, ts timeutil.TimeSource, - stopper *stop.Stopper, -) error { - return nil +) int64 { + return 0 } // LicenseType returns what type of license the cluster is running with, or diff --git a/pkg/base/testing_knobs.go b/pkg/base/testing_knobs.go index 54b3f29d7734..f145195e9cc7 100644 --- a/pkg/base/testing_knobs.go +++ b/pkg/base/testing_knobs.go @@ -59,4 +59,5 @@ type TestingKnobs struct { KeyVisualizer ModuleTestingKnobs TenantCapabilitiesTestingKnobs ModuleTestingKnobs TableStatsKnobs ModuleTestingKnobs + Insights ModuleTestingKnobs } diff --git a/pkg/bench/rttanalysis/testdata/benchmark_expectations b/pkg/bench/rttanalysis/testdata/benchmark_expectations index ec0e2e1f4dae..bacc00a7bceb 100644 --- a/pkg/bench/rttanalysis/testdata/benchmark_expectations +++ b/pkg/bench/rttanalysis/testdata/benchmark_expectations @@ -67,6 +67,7 @@ exp,benchmark 3,Jobs/jobs_page_default 3,Jobs/jobs_page_latest_50 3,Jobs/jobs_page_type_filtered +1,Jobs/jobs_page_type_filtered_no_matches 3,Jobs/jobs_page_type_filtered_no_matches 8,Jobs/pause_job 6,Jobs/resume_job diff --git a/pkg/ccl/BUILD.bazel b/pkg/ccl/BUILD.bazel index 455181f4a067..4bc1da49f6b6 100644 --- a/pkg/ccl/BUILD.bazel +++ b/pkg/ccl/BUILD.bazel @@ -33,5 +33,6 @@ go_library( "//pkg/ccl/utilccl", "//pkg/ccl/workloadccl", "//pkg/server", + "//pkg/server/license", ], ) diff --git a/pkg/ccl/backupccl/BUILD.bazel b/pkg/ccl/backupccl/BUILD.bazel index 5b29fc7c51f2..49941eb2a069 100644 --- a/pkg/ccl/backupccl/BUILD.bazel +++ b/pkg/ccl/backupccl/BUILD.bazel @@ -329,7 +329,8 @@ go_test( "//pkg/workload/bank", "//pkg/workload/histogram", "//pkg/workload/workloadsql", - "@com_github_aws_aws_sdk_go//aws/credentials", + "@com_github_aws_aws_sdk_go_v2//aws", + "@com_github_aws_aws_sdk_go_v2_config//:config", "@com_github_cockroachdb_cockroach_go_v2//crdb", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/ccl/backupccl/backup_cloud_test.go b/pkg/ccl/backupccl/backup_cloud_test.go index 40deccd1d49c..38e241c35d5c 100644 --- a/pkg/ccl/backupccl/backup_cloud_test.go +++ b/pkg/ccl/backupccl/backup_cloud_test.go @@ -15,7 +15,8 @@ import ( "os" "testing" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/amazon" @@ -82,21 +83,23 @@ func TestCloudBackupRestoreS3WithLegacyPut(t *testing.T) { backupAndRestore(ctx, t, tc, []string{uri.String()}, []string{uri.String()}, numAccounts, nil) } -func requiredS3CredsAndBucket(t *testing.T) (credentials.Value, string) { +func requiredS3CredsAndBucket(t *testing.T) (aws.Credentials, string) { t.Helper() - creds, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLintf(t, "No AWS env keys (%v)", err) } + bucket := os.Getenv("AWS_S3_BUCKET") if bucket == "" { skip.IgnoreLint(t, "AWS_S3_BUCKET env var must be set") } - return creds, bucket + return envConfig.Credentials, bucket } func setupS3URI( - t *testing.T, db *sqlutils.SQLRunner, bucket string, prefix string, creds credentials.Value, + t *testing.T, db *sqlutils.SQLRunner, bucket string, prefix string, creds aws.Credentials, ) url.URL { t.Helper() endpoint := os.Getenv(amazon.NightlyEnvVarS3Params[amazon.AWSEndpointParam]) diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 53ed3bc204e3..74797d2d9dda 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -638,6 +638,15 @@ func backupPlanHook( return errors.AssertionFailedf("unexpected descriptor coverage %v", backupStmt.Coverage()) } + for _, t := range targetDescs { + if tbl, ok := t.(catalog.TableDescriptor); ok && tbl.ExternalRowData() != nil { + if tbl.ExternalRowData().TenantID.IsSet() { + return errors.UnimplementedError(errors.IssueLink{}, "backing up from a replication target is not supported") + } + return errors.UnimplementedError(errors.IssueLink{}, "backing up from external tables is not supported") + } + } + // Check BACKUP privileges. err = checkPrivilegesForBackup(ctx, backupStmt, p, targetDescs, to) if err != nil { diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index c35c766e18f1..fc71ca7b3029 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -32,7 +32,7 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/config" "github.com/cockroachdb/cockroach-go/v2/crdb" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" @@ -4273,9 +4273,10 @@ func getAWSKMSURI(t *testing.T, regionEnvVariable, keyIDEnvVariable string) (str // If environment credentials are not present, we want to // skip all AWS KMS tests, including auth-implicit, even though // it is not used in auth-implicit. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { - skip.IgnoreLint(t, "Test only works with AWS credentials") + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { + skip.IgnoreLint(t, "No AWS credentials") } q := make(url.Values) @@ -6450,7 +6451,7 @@ func TestProtectedTimestampsFailDueToLimits(t *testing.T) { // Creating the protected timestamp record should fail because there are too // many spans. Ensure that we get the appropriate error. _, err := db.Exec(`BACKUP TABLE foo, bar TO 'nodelocal://1/foo/byte-limit'`) - require.EqualError(t, err, "pq: protectedts: limit exceeded: 0+30 > 1 bytes") + require.ErrorContains(t, err, "pq: protectedts: limit exceeded") // TODO(adityamaru): Remove in 22.2 once no records protect spans. t.Run("deprecated-spans-limit", func(t *testing.T) { diff --git a/pkg/ccl/backupccl/backuprand/backup_rand_test.go b/pkg/ccl/backupccl/backuprand/backup_rand_test.go index 68d01d4d98b0..bc58d4f2278b 100644 --- a/pkg/ccl/backupccl/backuprand/backup_rand_test.go +++ b/pkg/ccl/backupccl/backuprand/backup_rand_test.go @@ -166,9 +166,7 @@ database_name = 'rand' AND schema_name = 'public'`) withOnlineRestore := func() string { onlineRestoreExtension := "" if rng.Intn(2) != 0 { - // TODO(msbutler): once this test is deflaked, add back the online restore - // variant of this test. - onlineRestoreExtension = "" + onlineRestoreExtension = ", experimental deferred copy" } return onlineRestoreExtension } diff --git a/pkg/ccl/backupccl/generative_split_and_scatter_processor.go b/pkg/ccl/backupccl/generative_split_and_scatter_processor.go index 3331e0c34d91..c4d7ffd25a29 100644 --- a/pkg/ccl/backupccl/generative_split_and_scatter_processor.go +++ b/pkg/ccl/backupccl/generative_split_and_scatter_processor.go @@ -543,100 +543,99 @@ func runGenerativeSplitAndScatter( }) importSpanChunksCh := make(chan scatteredChunk, chunkSplitAndScatterWorkers*2) - - // This group of goroutines processes the chunks from restoreEntryChunksCh. - // For each chunk, a split is created at the start key of the next chunk. The - // current chunk is then scattered, and the chunk with its destination is - // passed to importSpanChunksCh. - g2 := ctxgroup.WithContext(ctx) - for worker := 0; worker < chunkSplitAndScatterWorkers; worker++ { - worker := worker - g2.GoCtx(func(ctx context.Context) error { - hash := fnv.New32a() - - // Chunks' leaseholders should be randomly placed throughout the - // cluster. - for importSpanChunk := range restoreEntryChunksCh { - scatterKey := importSpanChunk.entries[0].Span.Key - if !importSpanChunk.splitKey.Equal(roachpb.Key{}) { - // Split at the start of the next chunk, to partition off a - // prefix of the space to scatter. - if err := chunkSplitAndScatterers[worker].split(ctx, flowCtx.Codec(), importSpanChunk.splitKey); err != nil { + g.GoCtx(func(ctx context.Context) error { + defer close(importSpanChunksCh) + // This group of goroutines processes the chunks from restoreEntryChunksCh. + // For each chunk, a split is created at the start key of the next chunk. The + // current chunk is then scattered, and the chunk with its destination is + // passed to importSpanChunksCh. + g2 := ctxgroup.WithContext(ctx) + for worker := 0; worker < chunkSplitAndScatterWorkers; worker++ { + worker := worker + g2.GoCtx(func(ctx context.Context) error { + hash := fnv.New32a() + + // Chunks' leaseholders should be randomly placed throughout the + // cluster. + for importSpanChunk := range restoreEntryChunksCh { + scatterKey := importSpanChunk.entries[0].Span.Key + if !importSpanChunk.splitKey.Equal(roachpb.Key{}) { + // Split at the start of the next chunk, to partition off a + // prefix of the space to scatter. + if err := chunkSplitAndScatterers[worker].split(ctx, flowCtx.Codec(), importSpanChunk.splitKey); err != nil { + return err + } + } + chunkDestination, err := chunkSplitAndScatterers[worker].scatter(ctx, flowCtx.Codec(), scatterKey) + if err != nil { return err } - } - chunkDestination, err := chunkSplitAndScatterers[worker].scatter(ctx, flowCtx.Codec(), scatterKey) - if err != nil { - return err - } - if chunkDestination == 0 { - // If scatter failed to find a node for range ingestion, route the - // range to a random node that has already been scattered to so far. - // The random node is chosen by hashing the scatter key. - if nodeID, ok := flowCtx.NodeID.OptionalNodeID(); ok { - cachedNodeIDs := cache.cachedNodeIDs() - if len(cachedNodeIDs) > 0 { - hash.Reset() - if _, err := hash.Write(scatterKey); err != nil { - log.Warningf(ctx, "scatter returned node 0. Route span starting at %s to current node %v because of hash error: %v", - scatterKey, nodeID, err) + if chunkDestination == 0 { + // If scatter failed to find a node for range ingestion, route the + // range to a random node that has already been scattered to so far. + // The random node is chosen by hashing the scatter key. + if nodeID, ok := flowCtx.NodeID.OptionalNodeID(); ok { + cachedNodeIDs := cache.cachedNodeIDs() + if len(cachedNodeIDs) > 0 { + hash.Reset() + if _, err := hash.Write(scatterKey); err != nil { + log.Warningf(ctx, "scatter returned node 0. Route span starting at %s to current node %v because of hash error: %v", + scatterKey, nodeID, err) + } else { + hashedKey := int(hash.Sum32()) + nodeID = cachedNodeIDs[hashedKey%len(cachedNodeIDs)] + } + + log.Warningf(ctx, "scatter returned node 0. "+ + "Random route span starting at %s node %v", scatterKey, nodeID) } else { - hashedKey := int(hash.Sum32()) - nodeID = cachedNodeIDs[hashedKey%len(cachedNodeIDs)] + log.Warningf(ctx, "scatter returned node 0. "+ + "Route span starting at %s to current node %v", scatterKey, nodeID) } - - log.Warningf(ctx, "scatter returned node 0. "+ - "Random route span starting at %s node %v", scatterKey, nodeID) + chunkDestination = nodeID } else { + // TODO(rui): OptionalNodeID only returns a node if the sql server runs + // in the same process as the kv server (e.g., not serverless). Figure + // out how to handle this error in serverless restore. log.Warningf(ctx, "scatter returned node 0. "+ - "Route span starting at %s to current node %v", scatterKey, nodeID) + "Route span starting at %s to default stream", scatterKey) } - chunkDestination = nodeID - } else { - // TODO(rui): OptionalNodeID only returns a node if the sql server runs - // in the same process as the kv server (e.g., not serverless). Figure - // out how to handle this error in serverless restore. - log.Warningf(ctx, "scatter returned node 0. "+ - "Route span starting at %s to default stream", scatterKey) } - } - if _, ok := flowCtx.NodeID.OptionalNodeID(); !ok { - // If a seperate process tenant is running restore, the nodeID - // returned by scatter does not map identically to a sqlInstanceID; - // thus, the processor must randomly choose a sqlInstanceID to route - // the chunk to. - // - // TODO(msbutler): it is unfortunate that all logic after scatter - // operates on a NodeID object. The logic should use SQLInstanceID's - // as these chunks are routed to DistSQL processors running on sql - // servers. - if len(spec.SQLInstanceIDs) > 0 { - chunkDestination = roachpb.NodeID(spec.SQLInstanceIDs[rand.Intn(len(spec.SQLInstanceIDs))]) - } else { - chunkDestination = roachpb.NodeID(0) + if _, ok := flowCtx.NodeID.OptionalNodeID(); !ok { + // If a seperate process tenant is running restore, the nodeID + // returned by scatter does not map identically to a sqlInstanceID; + // thus, the processor must randomly choose a sqlInstanceID to route + // the chunk to. + // + // TODO(msbutler): it is unfortunate that all logic after scatter + // operates on a NodeID object. The logic should use SQLInstanceID's + // as these chunks are routed to DistSQL processors running on sql + // servers. + if len(spec.SQLInstanceIDs) > 0 { + chunkDestination = roachpb.NodeID(spec.SQLInstanceIDs[rand.Intn(len(spec.SQLInstanceIDs))]) + } else { + chunkDestination = roachpb.NodeID(0) + } } - } - sc := scatteredChunk{ - destination: chunkDestination, - entries: importSpanChunk.entries, - } + sc := scatteredChunk{ + destination: chunkDestination, + entries: importSpanChunk.entries, + } - select { - case <-ctx.Done(): - return errors.Wrap(ctx.Err(), "sending scattered chunk") - case importSpanChunksCh <- sc: + select { + case <-ctx.Done(): + return errors.Wrap(ctx.Err(), "sending scattered chunk") + case importSpanChunksCh <- sc: + } } - } - return nil - }) - } + return nil + }) + } - // This goroutine waits for the chunkSplitAndScatter workers to finish so that - // it can close importSpanChunksCh. - g.GoCtx(func(ctx context.Context) error { - defer close(importSpanChunksCh) + // This goroutine waits for the chunkSplitAndScatter workers to finish so that + // it can close importSpanChunksCh. return errors.Wrap(g2.Wait(), "waiting for chunkSplitAndScatter workers") }) diff --git a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go index be21e4c4b482..456a2bc158f1 100644 --- a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go +++ b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -229,23 +230,34 @@ func TestRunGenerativeSplitAndScatterRandomizedDestOnFailScatter(t *testing.T) { // and can break if it changes. require.GreaterOrEqual(t, cnt, 2) } + + // Also test that errors from split mid-chunk are returned (this deadlocked at + // one point). + spec.ChunkSize = 2 + require.Error(t, runGenerativeSplitAndScatter(ctx, &flowCtx, &spec, + []splitAndScatterer{&scatterAlwaysFailsSplitScatterer{}}, + []splitAndScatterer{&scatterAlwaysFailsSplitScatterer{err: errors.New("injected")}}, + make(chan entryNode, 1000), + &cache, + )) } // scatterAlwaysFailsSplitScatterer always fails the scatter and returns 0 as // the chunk destination. type scatterAlwaysFailsSplitScatterer struct { + err error } func (t *scatterAlwaysFailsSplitScatterer) split( ctx context.Context, codec keys.SQLCodec, splitKey roachpb.Key, ) error { - return nil + return t.err } func (t *scatterAlwaysFailsSplitScatterer) scatter( ctx context.Context, codec keys.SQLCodec, scatterKey roachpb.Key, ) (roachpb.NodeID, error) { - return 0, nil + return 0, t.err } func makeTestingGenerativeSplitAndScatterSpec( diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 71ff6fa2d765..6a55711ac67f 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -882,21 +882,6 @@ func spansForAllRestoreTableIndexes( }) return false }) - - if forOnlineRestore { - spans, _ = roachpb.MergeSpans(&spans) - tableIDMap := make(map[uint32]struct{}, len(spans)) - for _, sp := range spans { - _, tableID, err := codec.DecodeTablePrefix(sp.Key) - if err != nil { - return nil, err - } - if _, exists := tableIDMap[tableID]; exists { - return nil, errors.Newf("restore target contains two distinct spans with table id %d. Online restore cannot handle this as it may make an empty file span", tableID) - } - tableIDMap[tableID] = struct{}{} - } - } return spans, nil } diff --git a/pkg/ccl/backupccl/restore_online.go b/pkg/ccl/backupccl/restore_online.go index 98d64ba73b63..8e8e0ba50aac 100644 --- a/pkg/ccl/backupccl/restore_online.go +++ b/pkg/ccl/backupccl/restore_online.go @@ -302,11 +302,6 @@ func sendAddRemoteSSTWorker( ) } - // TODO(dt): remove when pebble supports empty (virtual) files. - if !file.BackupFileEntrySpan.Equal(restoringSubspan) { - return errors.AssertionFailedf("file span %s at path %s is not contained in restore span %s", file.BackupFileEntrySpan, file.Path, entry.Span) - } - restoringSubspan, err := rewriteSpan(&kr, restoringSubspan.Clone(), entry.ElidedPrefix) if err != nil { return err diff --git a/pkg/ccl/backupccl/system_schema.go b/pkg/ccl/backupccl/system_schema.go index 9d5cbb877392..1099c526f8fb 100644 --- a/pkg/ccl/backupccl/system_schema.go +++ b/pkg/ccl/backupccl/system_schema.go @@ -847,6 +847,9 @@ var systemTableBackupConfiguration = map[string]systemBackupConfiguration{ systemschema.TransactionExecInsightsTable.GetName(): { shouldIncludeInClusterBackup: optOutOfClusterBackup, }, + systemschema.TableMetadata.GetName(): { + shouldIncludeInClusterBackup: optOutOfClusterBackup, + }, } func rekeySystemTable( diff --git a/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_procedures b/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_procedures index 15108a5b2e45..b93d4305cb27 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_procedures +++ b/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_procedures @@ -94,6 +94,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE foobar sc1.enum1; @@ -134,6 +135,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE foobar sc1.enum1; @@ -300,6 +302,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE foobar sc1.enum1; @@ -341,6 +344,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE foobar sc1.enum1; diff --git a/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_user_defined_functions b/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_user_defined_functions index d720c6355d98..76f689693562 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_user_defined_functions +++ b/pkg/ccl/backupccl/testdata/backup-restore/plpgsql_user_defined_functions @@ -115,6 +115,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8 := 0; @@ -140,6 +141,7 @@ CREATE FUNCTION sc2.f2() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8; @@ -176,6 +178,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8 := 0; @@ -196,6 +199,7 @@ CREATE FUNCTION sc2.f2() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8; @@ -366,6 +370,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8; @@ -409,6 +414,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8; diff --git a/pkg/ccl/backupccl/testdata/backup-restore/procedures b/pkg/ccl/backupccl/testdata/backup-restore/procedures index 43110e061a71..efc0ac268267 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/procedures +++ b/pkg/ccl/backupccl/testdata/backup-restore/procedures @@ -88,6 +88,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -124,6 +125,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1_new.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -280,6 +282,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -317,6 +320,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p1] ---- CREATE PROCEDURE sc1.p1(a sc1.enum1) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; diff --git a/pkg/ccl/backupccl/testdata/backup-restore/user-defined-functions b/pkg/ccl/backupccl/testdata/backup-restore/user-defined-functions index 330252f3b41d..4adaeb06527b 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/user-defined-functions +++ b/pkg/ccl/backupccl/testdata/backup-restore/user-defined-functions @@ -91,6 +91,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -127,6 +128,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1_new.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -275,6 +277,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; @@ -312,6 +315,7 @@ CREATE FUNCTION sc1.f1(a sc1.enum1) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM db1.sc1.tbl1; SELECT 'Good':::sc1.enum1; diff --git a/pkg/ccl/ccl_init.go b/pkg/ccl/ccl_init.go index aa0f7115a9c8..c4201d3924c0 100644 --- a/pkg/ccl/ccl_init.go +++ b/pkg/ccl/ccl_init.go @@ -39,6 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" _ "github.com/cockroachdb/cockroach/pkg/ccl/workloadccl" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/license" ) func init() { @@ -51,8 +52,9 @@ func init() { // this `ccl` pkg. base.CheckEnterpriseEnabled = utilccl.CheckEnterpriseEnabled base.LicenseType = utilccl.GetLicenseType - base.UpdateMetricOnLicenseChange = utilccl.UpdateMetricOnLicenseChange + base.GetLicenseTTL = utilccl.GetLicenseTTL server.ApplyTenantLicense = utilccl.ApplyTenantLicense + license.RegisterCallbackOnLicenseChange = utilccl.RegisterCallbackOnLicenseChange } // TestingEnableEnterprise allows overriding the license check in tests. diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index c7cda9ae2166..bc73ee3d1540 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -271,6 +271,7 @@ go_test( "//pkg/kv/kvserver/kvserverbase", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", + "//pkg/kv/kvserver/rangefeed", "//pkg/roachpb", "//pkg/scheduledjobs", "//pkg/scheduledjobs/schedulebase", @@ -335,6 +336,7 @@ go_test( "//pkg/util/randident", "//pkg/util/randutil", "//pkg/util/retry", + "//pkg/util/shuffle", "//pkg/util/span", "//pkg/util/syncutil", "//pkg/util/timeutil", diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go index fc33d753f787..3d4ee2b8f0bc 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_test.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go @@ -240,6 +240,8 @@ func TestAlterChangefeedAddTargetFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + require.NoError(t, log.SetVModule("helpers_test=1")) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) diff --git a/pkg/ccl/changefeedccl/avro.go b/pkg/ccl/changefeedccl/avro.go index e8d466d18af3..1195111a1ad8 100644 --- a/pkg/ccl/changefeedccl/avro.go +++ b/pkg/ccl/changefeedccl/avro.go @@ -169,6 +169,7 @@ type avroMetadata map[string]interface{} type avroEnvelopeOpts struct { beforeField, afterField, recordField bool updatedField, resolvedField bool + mvccTimestampField bool } // avroEnvelopeRecord is an `avroRecord` that wraps a changed SQL row and some @@ -954,6 +955,14 @@ func envelopeToAvroSchema( } schema.Fields = append(schema.Fields, updatedField) } + if opts.mvccTimestampField { + mvccTimestampField := &avroSchemaField{ + SchemaType: []avroSchemaType{avroSchemaNull, avroSchemaString}, + Name: `mvcc_timestamp`, + Default: nil, + } + schema.Fields = append(schema.Fields, mvccTimestampField) + } if opts.resolvedField { resolvedField := &avroSchemaField{ SchemaType: []avroSchemaType{avroSchemaNull, avroSchemaString}, @@ -1037,6 +1046,20 @@ func (r *avroEnvelopeRecord) BinaryFromRow( native[`updated`] = goavro.Union(avroUnionKey(avroSchemaString), ts.AsOfSystemTime()) } } + + if r.opts.mvccTimestampField { + native[`mvcc_timestamp`] = nil + if u, ok := meta[`mvcc_timestamp`]; ok { + delete(meta, `mvcc_timestamp`) + ts, ok := u.(hlc.Timestamp) + if !ok { + return nil, changefeedbase.WithTerminalError( + errors.Errorf(`unknown metadata timestamp type: %T`, u)) + } + native[`mvcc_timestamp`] = goavro.Union(avroUnionKey(avroSchemaString), ts.AsOfSystemTime()) + } + } + if r.opts.resolvedField { native[`resolved`] = nil if u, ok := meta[`resolved`]; ok { diff --git a/pkg/ccl/changefeedccl/cdctest/testfeed.go b/pkg/ccl/changefeedccl/cdctest/testfeed.go index 8d6687a5ca73..fe0b3a61dfa9 100644 --- a/pkg/ccl/changefeedccl/cdctest/testfeed.go +++ b/pkg/ccl/changefeedccl/cdctest/testfeed.go @@ -84,6 +84,8 @@ type EnterpriseTestFeed interface { FetchRunningStatus() (string, error) // Details returns changefeed details for this feed. Details() (*jobspb.ChangefeedDetails, error) + // Progress returns the changefeed progress for this feed. + Progress() (*jobspb.ChangefeedProgress, error) // HighWaterMark returns feed highwatermark. HighWaterMark() (hlc.Timestamp, error) // TickHighWaterMark waits until job highwatermark progresses beyond specified threshold. diff --git a/pkg/ccl/changefeedccl/changefeed_dist_test.go b/pkg/ccl/changefeedccl/changefeed_dist_test.go index dbb1112d9422..6708e9968d86 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist_test.go +++ b/pkg/ccl/changefeedccl/changefeed_dist_test.go @@ -405,6 +405,12 @@ func newRangeDistributionTester( systemDB.Exec(t, "SET CLUSTER SETTING kv.rangefeed.enabled = true") if tc.StartedDefaultTestTenant() { systemDB.Exec(t, `ALTER TENANT [$1] GRANT CAPABILITY can_admin_relocate_range=true`, serverutils.TestTenantID().ToUint64()) + // Give 1,000,000 upfront tokens to the tenant, and keep the tokens per + // second rate to the default value of 10,000. This helps avoid throttling + // in the tests. + systemDB.Exec(t, + "SELECT crdb_internal.update_tenant_resource_limits($1::INT, 1000000, 10000, 0, now(), 0)", + serverutils.TestTenantID().ToUint64()) } t.Logf("%s: creating and inserting rows into table", timeutil.Now().Format(time.DateTime)) diff --git a/pkg/ccl/changefeedccl/changefeed_processors.go b/pkg/ccl/changefeedccl/changefeed_processors.go index 9141b90f8180..2b5d435f6552 100644 --- a/pkg/ccl/changefeedccl/changefeed_processors.go +++ b/pkg/ccl/changefeedccl/changefeed_processors.go @@ -1714,10 +1714,10 @@ func (cf *changeFrontier) manageProtectedTimestamps( ctx context.Context, txn isql.Txn, progress *jobspb.ChangefeedProgress, ) error { ptsUpdateInterval := changefeedbase.ProtectTimestampInterval.Get(&cf.FlowCtx.Cfg.Settings.SV) + ptsUpdateLag := changefeedbase.ProtectTimestampLag.Get(&cf.FlowCtx.Cfg.Settings.SV) if timeutil.Since(cf.lastProtectedTimestampUpdate) < ptsUpdateInterval { return nil } - cf.lastProtectedTimestampUpdate = timeutil.Now() pts := cf.FlowCtx.Cfg.ProtectedTimestampProvider.WithTxn(txn) @@ -1732,17 +1732,26 @@ func (cf *changeFrontier) manageProtectedTimestamps( ctx, cf.FlowCtx.Codec(), cf.spec.JobID, AllTargets(cf.spec.Feed), highWater, ) progress.ProtectedTimestampRecord = ptr.ID.GetUUID() + cf.lastProtectedTimestampUpdate = timeutil.Now() return pts.Protect(ctx, ptr) } - log.VEventf(ctx, 2, "updating protected timestamp %v at %v", progress.ProtectedTimestampRecord, highWater) - rec, err := pts.GetRecord(ctx, progress.ProtectedTimestampRecord) if err != nil { return err } if rec.Target != nil { + // Only update the PTS timestamp if it is lagging behind the high + // watermark. This is to prevent a rush of updates to the PTS if the + // changefeed restarts, which can cause contention and second order effects + // on system tables. + if !rec.Timestamp.AddDuration(ptsUpdateLag).Less(highWater) { + return nil + } + + log.VEventf(ctx, 2, "updating protected timestamp %v at %v", progress.ProtectedTimestampRecord, highWater) + cf.lastProtectedTimestampUpdate = timeutil.Now() return pts.UpdateTimestamp(ctx, progress.ProtectedTimestampRecord, highWater) } @@ -1766,6 +1775,7 @@ func (cf *changeFrontier) manageProtectedTimestamps( progress.ProtectedTimestampRecord, prevRecordId, highWater) } + cf.lastProtectedTimestampUpdate = timeutil.Now() return nil } diff --git a/pkg/ccl/changefeedccl/changefeed_processors_test.go b/pkg/ccl/changefeedccl/changefeed_processors_test.go index 804717917ea3..06bc91443e6e 100644 --- a/pkg/ccl/changefeedccl/changefeed_processors_test.go +++ b/pkg/ccl/changefeedccl/changefeed_processors_test.go @@ -9,13 +9,20 @@ package changefeedccl import ( + "sort" "testing" + "time" + "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/randutil" + "github.com/cockroachdb/cockroach/pkg/util/shuffle" + "github.com/cockroachdb/cockroach/pkg/util/span" "github.com/stretchr/testify/require" ) @@ -133,3 +140,95 @@ func TestSetupSpansAndFrontier(t *testing.T) { }) } } + +type rspans []roachpb.Span + +func (rs rspans) Len() int { + return len(rs) +} + +func (rs rspans) Less(i int, j int) bool { + return rs[i].Key.Compare(rs[j].Key) < 0 +} + +func (rs rspans) Swap(i int, j int) { + rs[i], rs[j] = rs[j], rs[i] +} + +type checkpointSpan struct { + span roachpb.Span + ts hlc.Timestamp +} + +type checkpointSpans []checkpointSpan + +func (rs checkpointSpans) Len() int { + return len(rs) +} + +func (rs checkpointSpans) Less(i int, j int) bool { + return rs[i].span.Key.Compare(rs[j].span.Key) < 0 +} + +func (rs checkpointSpans) Swap(i int, j int) { + rs[i], rs[j] = rs[j], rs[i] +} + +// TestGetCheckpointSpans generates 100 random non-overlapping spans with random +// timestamps within a minute of each other and turns them into checkpoint +// spans. It then does some sanity checks. It also compares the total +// catchup time between the checkpoint timestamp and the high watermark. +// Although the test relies on internal implementation details, it is a +// good base to explore other fine-grained checkpointing algorithms. +func TestGetCheckpointSpans(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + const numSpans = 100 + maxBytes := changefeedbase.FrontierCheckpointMaxBytes.Default() + hwm := hlc.Timestamp{} + rng, _ := randutil.NewTestRand() + + spans := make(checkpointSpans, numSpans) + + // Generate spans. They should not be overlapping. + // Randomize the order in which spans are processed. + for i, s := range rangefeed.GenerateRandomizedSpans(rng, numSpans) { + ts := rangefeed.GenerateRandomizedTs(rng, time.Minute.Nanoseconds()) + if hwm.IsEmpty() || ts.Less(hwm) { + hwm = ts + } + spans[i] = checkpointSpan{s.AsRawSpanWithNoLocals(), ts} + } + shuffle.Shuffle(spans) + + forEachSpan := func(fn span.Operation) { + for _, s := range spans { + fn(s.span, s.ts) + } + } + + // Compute the checkpoint. + cpSpans, cpTS := getCheckpointSpans(hwm, forEachSpan, maxBytes) + require.Less(t, len(cpSpans), numSpans) + require.True(t, hwm.Less(cpTS)) + + // Calculate the total amount of time these spans would have to "catch up" + // using the checkpoint spans compared to starting at the frontier. + catchup := cpTS.GoTime().Sub(hwm.GoTime()) + sort.Sort(rspans(cpSpans)) + sort.Sort(spans) + var catchupFromCheckpoint, catchupFromHWM time.Duration + j := 0 + for _, s := range spans { + catchupFromHWM += s.ts.GoTime().Sub(hwm.GoTime()) + if j < len(cpSpans) && cpSpans[j].Equal(s.span) { + catchupFromCheckpoint += s.ts.GoTime().Sub(cpTS.GoTime()) + j++ + } + } + t.Logf("Checkpoint time improved by %v for %d/%d spans\ntotal catchup from checkpoint: %v\ntotal catchup from high watermark: %v\nPercent improvement %f", + catchup, len(cpSpans), numSpans, catchupFromCheckpoint, catchupFromHWM, + 100*(1-float64(catchupFromCheckpoint.Nanoseconds())/float64(catchupFromHWM.Nanoseconds()))) + require.Less(t, catchupFromCheckpoint, catchupFromHWM) +} diff --git a/pkg/ccl/changefeedccl/changefeed_stmt.go b/pkg/ccl/changefeedccl/changefeed_stmt.go index da060bf76568..8a2ce5ba35cb 100644 --- a/pkg/ccl/changefeedccl/changefeed_stmt.go +++ b/pkg/ccl/changefeedccl/changefeed_stmt.go @@ -341,13 +341,20 @@ func coreChangefeed( p.ExtendedEvalContext().ChangefeedState = localState knobs, _ := p.ExecCfg().DistSQLSrv.TestingKnobs.Changefeed.(*TestingKnobs) - for r := getRetry(ctx); r.Next(); { + for r := getRetry(ctx); ; { + if !r.Next() { + // Retry loop exits when context is canceled. + log.Infof(ctx, "core changefeed retry loop exiting: %s", ctx.Err()) + return ctx.Err() + } + if knobs != nil && knobs.BeforeDistChangefeed != nil { knobs.BeforeDistChangefeed() } err := distChangefeedFlow(ctx, p, 0 /* jobID */, details, localState, resultsCh) if err == nil { + log.Infof(ctx, "core changefeed completed with no error") return nil } @@ -364,7 +371,6 @@ func coreChangefeed( // information which is saved in the localState. log.Infof(ctx, "core changefeed retrying due to transient error: %s", err) } - return ctx.Err() // retry loop exits when context cancels. } func createChangefeedJobRecord( @@ -460,6 +466,15 @@ func createChangefeedJobRecord( return nil, err } + for _, t := range targetDescs { + if tbl, ok := t.(catalog.TableDescriptor); ok && tbl.ExternalRowData() != nil { + if tbl.ExternalRowData().TenantID.IsSet() { + return nil, errors.UnimplementedError(errors.IssueLink{}, "changefeeds on a replication target are not supported") + } + return nil, errors.UnimplementedError(errors.IssueLink{}, "changefeeds on external tables are not supported") + } + } + targets, tables, err := getTargetsAndTables(ctx, p, targetDescs, changefeedStmt.Targets, changefeedStmt.originalSpecs, opts.ShouldUseFullStatementTimeName(), sinkURI) diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 88f5f1e2132f..775947dc19dd 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -998,6 +998,34 @@ func TestChangefeedMVCCTimestamps(t *testing.T) { cdcTest(t, testFn) } +func TestChangefeedMVCCTimestampsAvro(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, `CREATE TABLE mvcc_timestamp_test_table (id UUID PRIMARY KEY DEFAULT gen_random_uuid())`) + + const rowCount = 5 + expectedPayloads := make([]string, rowCount) + for i := 0; i < rowCount; i++ { + row := sqlDB.QueryRow(t, `INSERT INTO mvcc_timestamp_test_table VALUES (DEFAULT) RETURNING id, cluster_logical_timestamp()`) + + var id string + var mvccTimestamp string + row.Scan(&id, &mvccTimestamp) + expectedPayloads[i] = fmt.Sprintf(`mvcc_timestamp_test_table: {"id":{"string":"%[1]s"}}->{"after":{"mvcc_timestamp_test_table":{"id":{"string":"%[1]s"}}},"mvcc_timestamp":{"string":"%[2]s"}}`, + id, mvccTimestamp) + } + + changeFeed := feed(t, f, `CREATE CHANGEFEED FOR mvcc_timestamp_test_table WITH mvcc_timestamp, format='avro'`) + defer closeFeed(t, changeFeed) + assertPayloads(t, changeFeed, expectedPayloads) + } + + cdcTest(t, testFn, feedTestForceSink(`kafka`)) +} + func TestChangefeedResolvedFrequency(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -9426,3 +9454,74 @@ func TestChangefeedAvroDecimalColumnWithDiff(t *testing.T) { cdcTest(t, testFn, feedTestForceSink("kafka")) } + +func TestChangefeedProtectedTimestampUpdate(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + // Checkpoint and trigger potential protected timestamp updates frequently. + // Make the protected timestamp lag long enough that it shouldn't be + // immediately updated after a restart. + changefeedbase.FrontierCheckpointFrequency.Override( + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) + changefeedbase.ProtectTimestampInterval.Override( + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) + changefeedbase.ProtectTimestampLag.Override( + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Hour) + + sqlDB.Exec(t, `CREATE TABLE foo (id INT)`) + + createStmt := `CREATE CHANGEFEED FOR foo WITH resolved='10ms', no_initial_scan` + testFeed := feed(t, f, createStmt) + defer closeFeed(t, testFeed) + + eFeed, ok := testFeed.(cdctest.EnterpriseTestFeed) + require.True(t, ok) + + // Wait for the changefeed to checkpoint. + var lastHWM hlc.Timestamp + checkHWM := func() error { + hwm, err := eFeed.HighWaterMark() + if err == nil && !hwm.IsEmpty() && lastHWM.Less(hwm) { + lastHWM = hwm + return nil + } + return errors.New("waiting for high watermark to advance") + } + testutils.SucceedsSoon(t, checkHWM) + + // Get the PTS of this feed. + p, err := eFeed.Progress() + require.NoError(t, err) + + ptsQry := fmt.Sprintf(`SELECT ts FROM system.protected_ts_records WHERE id = '%s'`, p.ProtectedTimestampRecord) + var ts, ts2 string + sqlDB.QueryRow(t, ptsQry).Scan(&ts) + require.NoError(t, err) + + // Force the changefeed to restart. + require.NoError(t, eFeed.Pause()) + require.NoError(t, eFeed.Resume()) + + // Wait for a new checkpoint. + testutils.SucceedsSoon(t, checkHWM) + + // Check that the PTS was not updated after the resume. + sqlDB.QueryRow(t, ptsQry).Scan(&ts2) + require.NoError(t, err) + require.Equal(t, ts, ts2) + + // Lower the PTS lag and check that it has been updated. + changefeedbase.ProtectTimestampLag.Override( + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) + + testutils.SucceedsSoon(t, checkHWM) + + sqlDB.QueryRow(t, ptsQry).Scan(&ts2) + require.NoError(t, err) + require.Less(t, ts, ts2) + } + + cdcTest(t, testFn, feedTestForceSink("kafka")) +} diff --git a/pkg/ccl/changefeedccl/changefeedbase/settings.go b/pkg/ccl/changefeedccl/changefeedbase/settings.go index 8c43456f454e..cb9edc3a0cfd 100644 --- a/pkg/ccl/changefeedccl/changefeedbase/settings.go +++ b/pkg/ccl/changefeedccl/changefeedbase/settings.go @@ -203,6 +203,14 @@ var ProtectTimestampInterval = settings.RegisterDurationSetting( settings.PositiveDuration, settings.WithPublic) +// ProtectTimestampLag controls how much the protected timestamp record should lag behind the high watermark +var ProtectTimestampLag = settings.RegisterDurationSetting( + settings.ApplicationLevel, + "changefeed.protect_timestamp.lag", + "controls how far behind the checkpoint the changefeed's protected timestamp is", + 10*time.Minute, + settings.PositiveDuration) + // MaxProtectedTimestampAge controls the frequency of protected timestamp record updates var MaxProtectedTimestampAge = settings.RegisterDurationSetting( settings.ApplicationLevel, diff --git a/pkg/ccl/changefeedccl/encoder_avro.go b/pkg/ccl/changefeedccl/encoder_avro.go index d6cb74f3f424..0346e2311cad 100644 --- a/pkg/ccl/changefeedccl/encoder_avro.go +++ b/pkg/ccl/changefeedccl/encoder_avro.go @@ -34,6 +34,7 @@ type confluentAvroEncoder struct { schemaRegistry schemaRegistry schemaPrefix string updatedField, beforeField bool + mvccTimestampField bool virtualColumnVisibility changefeedbase.VirtualColumnVisibility targets changefeedbase.Targets envelopeType changefeedbase.EnvelopeType @@ -90,6 +91,7 @@ func newConfluentAvroEncoder( e.updatedField = opts.UpdatedTimestamps e.beforeField = opts.Diff e.customKeyColumn = opts.CustomKeyColumn + e.mvccTimestampField = opts.MVCCTimestamps // TODO: Implement this. if opts.KeyInValue { @@ -257,10 +259,10 @@ func (e *confluentAvroEncoder) EncodeValue( // This means metadata can safely go at the top level as there are never arbitrary column names // for it to conflict with. if e.envelopeType == changefeedbase.OptEnvelopeWrapped { - opts = avroEnvelopeOpts{afterField: true, beforeField: e.beforeField, updatedField: e.updatedField} + opts = avroEnvelopeOpts{afterField: true, beforeField: e.beforeField, updatedField: e.updatedField, mvccTimestampField: e.mvccTimestampField} afterDataSchema = currentSchema } else { - opts = avroEnvelopeOpts{recordField: true, updatedField: e.updatedField} + opts = avroEnvelopeOpts{recordField: true, updatedField: e.updatedField, mvccTimestampField: e.mvccTimestampField} recordDataSchema = currentSchema } @@ -284,11 +286,12 @@ func (e *confluentAvroEncoder) EncodeValue( e.valueCache.Add(cacheKey, registered) } - var meta avroMetadata + meta := avroMetadata{} if registered.schema.opts.updatedField { - meta = map[string]interface{}{ - `updated`: evCtx.updated, - } + meta[`updated`] = evCtx.updated + } + if registered.schema.opts.mvccTimestampField { + meta[`mvcc_timestamp`] = evCtx.mvcc } // https://docs.confluent.io/current/schema-registry/docs/serializer-formatter.html#wire-format diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 15add28b85cd..8a154ca5e933 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -101,7 +101,7 @@ func readNextMessages( return nil, ctx.Err() } if log.V(1) { - log.Infof(context.Background(), "About to read a message (%d out of %d)", len(actual), numMessages) + log.Infof(context.Background(), "about to read a message (%d out of %d)", len(actual), numMessages) } m, err := f.Next() if log.V(1) { diff --git a/pkg/ccl/changefeedccl/parquet_test.go b/pkg/ccl/changefeedccl/parquet_test.go index fe6903ec8397..c7ccdc28f8b0 100644 --- a/pkg/ccl/changefeedccl/parquet_test.go +++ b/pkg/ccl/changefeedccl/parquet_test.go @@ -185,7 +185,7 @@ func TestParquetRows(t *testing.T) { writer, err = newParquetWriterFromRow(updatedRow, f, encodingOpts, parquet.WithMaxRowGroupLength(maxRowGroupSize), parquet.WithCompressionCodec(parquet.CompressionGZIP)) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } numCols = len(updatedRow.ResultColumns()) + 1 } diff --git a/pkg/ccl/changefeedccl/protected_timestamps_test.go b/pkg/ccl/changefeedccl/protected_timestamps_test.go index afeadd9332ff..e1348e8f26e0 100644 --- a/pkg/ccl/changefeedccl/protected_timestamps_test.go +++ b/pkg/ccl/changefeedccl/protected_timestamps_test.go @@ -61,6 +61,8 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { ptsInterval := 50 * time.Millisecond changefeedbase.ProtectTimestampInterval.Override( context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) + changefeedbase.ProtectTimestampLag.Override( + context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) sqlDB := sqlutils.MakeSQLRunner(s.DB) sysDB := sqlutils.MakeSQLRunner(s.SystemServer.SQLConn(t)) @@ -255,6 +257,8 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { changefeedbase.ProtectTimestampInterval.Override( context.Background(), &s.Server.ClusterSettings().SV, 100*time.Millisecond) + changefeedbase.ProtectTimestampLag.Override( + context.Background(), &s.Server.ClusterSettings().SV, 100*time.Millisecond) ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID()) @@ -502,6 +506,8 @@ func TestChangefeedMigratesProtectedTimestamps(t *testing.T) { ptsInterval := 50 * time.Millisecond changefeedbase.ProtectTimestampInterval.Override( context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) + changefeedbase.ProtectTimestampLag.Override( + context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) sqlDB := sqlutils.MakeSQLRunner(s.DB) sysDB := sqlutils.MakeSQLRunner(s.SystemServer.SQLConn(t)) diff --git a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go index 84d9dab30fa5..a8132bc0b87c 100644 --- a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go +++ b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go @@ -261,7 +261,7 @@ func TestSerializesScheduledChangefeedExecutionArgs(t *testing.T) { } for _, tc := range testCases { - t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { + t.Run(tc.name, func(t *testing.T) { defer th.clearSchedules(t) sj, err := th.createChangefeedSchedule(t, tc.query, tc.queryArgs...) diff --git a/pkg/ccl/changefeedccl/testfeed_test.go b/pkg/ccl/changefeedccl/testfeed_test.go index 12aafe01c090..6abd0494429a 100644 --- a/pkg/ccl/changefeedccl/testfeed_test.go +++ b/pkg/ccl/changefeedccl/testfeed_test.go @@ -493,6 +493,19 @@ func (f *jobFeed) Details() (*jobspb.ChangefeedDetails, error) { return payload.GetChangefeed(), nil } +// Progress implements FeedJob interface. +func (f *jobFeed) Progress() (*jobspb.ChangefeedProgress, error) { + var details []byte + if err := f.db.QueryRow(jobutils.JobProgressByIDQuery, f.jobID).Scan(&details); err != nil { + return nil, errors.Wrapf(err, "Progress for job %d", f.jobID) + } + var progress jobspb.Progress + if err := protoutil.Unmarshal(details, &progress); err != nil { + return nil, err + } + return progress.GetChangefeed(), nil +} + // HighWaterMark implements FeedJob interface. func (f *jobFeed) HighWaterMark() (hlc.Timestamp, error) { var details []byte diff --git a/pkg/ccl/cliccl/BUILD.bazel b/pkg/ccl/cliccl/BUILD.bazel index b4dc6604f2bf..6038883750c6 100644 --- a/pkg/ccl/cliccl/BUILD.bazel +++ b/pkg/ccl/cliccl/BUILD.bazel @@ -44,7 +44,7 @@ go_library( "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_cockroachdb_redact//:redact", - "@com_github_lestrrat_go_jwx//jwk", + "@com_github_lestrrat_go_jwx_v2//jwk", "@com_github_olekukonko_tablewriter//:tablewriter", "@com_github_spf13_cobra//:cobra", "@com_github_spf13_pflag//:pflag", diff --git a/pkg/ccl/cliccl/gen.go b/pkg/ccl/cliccl/gen.go index 41cab793b42d..f44500314e19 100644 --- a/pkg/ccl/cliccl/gen.go +++ b/pkg/ccl/cliccl/gen.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/storageccl/engineccl/enginepbccl" "github.com/cockroachdb/cockroach/pkg/cli" "github.com/cockroachdb/errors" - "github.com/lestrrat-go/jwx/jwk" + "github.com/lestrrat-go/jwx/v2/jwk" "github.com/spf13/cobra" ) @@ -62,8 +62,10 @@ func genEncryptionKey( return fmt.Errorf("store key size should be 128, 192, or 256 bits, got %d", aesSize) } - symKey := jwk.NewSymmetricKey() - if err := symKey.FromRaw(key); err != nil { + // FromRaw uses the input type to instantiate particular type of key. + // []byte input type is used to generate a symmetric key here. + symKey, err := jwk.FromRaw(key) + if err != nil { return errors.Wrap(err, "error setting key bytes") } if err := symKey.Set(jwk.KeyIDKey, hex.EncodeToString(keyID)); err != nil { @@ -78,7 +80,9 @@ func genEncryptionKey( } keySet := jwk.NewSet() - keySet.Add(symKey) + if err = keySet.AddKey(symKey); err != nil { + return err + } b, err = json.Marshal(keySet) if err != nil { diff --git a/pkg/ccl/cloudccl/amazon/BUILD.bazel b/pkg/ccl/cloudccl/amazon/BUILD.bazel index 1417d9e2e980..9ff51683aab2 100644 --- a/pkg/ccl/cloudccl/amazon/BUILD.bazel +++ b/pkg/ccl/cloudccl/amazon/BUILD.bazel @@ -24,6 +24,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", - "@com_github_aws_aws_sdk_go//aws/credentials", + "@com_github_aws_aws_sdk_go_v2_config//:config", + "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/cloudccl/amazon/s3_connection_test.go b/pkg/ccl/cloudccl/amazon/s3_connection_test.go index a6585d903e9a..990411930bbd 100644 --- a/pkg/ccl/cloudccl/amazon/s3_connection_test.go +++ b/pkg/ccl/cloudccl/amazon/s3_connection_test.go @@ -16,7 +16,7 @@ import ( "strings" "testing" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/config" "github.com/cockroachdb/cockroach/pkg/base" _ "github.com/cockroachdb/cockroach/pkg/ccl" "github.com/cockroachdb/cockroach/pkg/cloud" @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" ) func TestS3ExternalConnection(t *testing.T) { @@ -67,10 +68,13 @@ func TestS3ExternalConnection(t *testing.T) { // If environment credentials are not present, we want to // skip all S3 tests, including auth-implicit, even though // it is not used in auth-implicit. - creds, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } + + creds := envConfig.Credentials bucket := os.Getenv("AWS_S3_BUCKET") if bucket == "" { skip.IgnoreLint(t, "AWS_S3_BUCKET env var must be set") @@ -83,8 +87,11 @@ func TestS3ExternalConnection(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) @@ -120,8 +127,11 @@ func TestS3ExternalConnection(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) @@ -156,8 +166,11 @@ func TestS3ExternalConnection(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) @@ -220,9 +233,10 @@ func TestAWSKMSExternalConnection(t *testing.T) { // If environment credentials are not present, we want to // skip all AWS KMS tests, including auth-implicit, even though // it is not used in auth-implicit. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { - skip.IgnoreLint(t, "Test only works with AWS credentials") + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { + skip.IgnoreLint(t, "No AWS credentials") } q := make(url.Values) @@ -331,9 +345,10 @@ func TestAWSKMSExternalConnectionAssumeRole(t *testing.T) { // If environment credentials are not present, we want to // skip all AWS KMS tests, including auth-implicit, even though // it is not used in auth-implicit. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { - skip.IgnoreLint(t, "Test only works with AWS credentials") + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { + skip.IgnoreLint(t, "No AWS credentials") } q := make(url.Values) @@ -380,8 +395,11 @@ func TestAWSKMSExternalConnectionAssumeRole(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLint(t, err) } diff --git a/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection index 0727c2efbc05..a41b680b1516 100644 --- a/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection +++ b/pkg/ccl/cloudccl/externalconn/testdata/create_drop_external_connection @@ -111,7 +111,7 @@ disable-check-external-storage ---- exec-sql -CREATE EXTERNAL CONNECTION "foo-s3" AS 's3://foo/bar?AUTH=implicit&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&ASSUME_ROLE=ronaldo,rashford,bruno'; +CREATE EXTERNAL CONNECTION "foo-s3" AS 's3://foo/bar?AUTH=specified&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&AWS_REGION=us-east-1&ASSUME_ROLE=ronaldo,rashford,bruno'; ---- # Reject invalid S3 URIs. @@ -127,7 +127,7 @@ pq: failed to construct External Connection details: failed to create s3 externa inspect-system-table ---- -foo-s3 STORAGE {"provider": "s3", "simpleUri": {"uri": "s3://foo/bar?AUTH=implicit&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&ASSUME_ROLE=ronaldo,rashford,bruno"}} root 1 +foo-s3 STORAGE {"provider": "s3", "simpleUri": {"uri": "s3://foo/bar?AUTH=specified&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&AWS_REGION=us-east-1&ASSUME_ROLE=ronaldo,rashford,bruno"}} root 1 exec-sql DROP EXTERNAL CONNECTION "foo-s3"; diff --git a/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection index ed3b8ebff40c..6c2b6aa7c30e 100644 --- a/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection +++ b/pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/create_drop_external_connection @@ -91,7 +91,7 @@ disable-check-external-storage ---- exec-sql -CREATE EXTERNAL CONNECTION "foo-s3" AS 's3://foo/bar?AUTH=implicit&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&ASSUME_ROLE=ronaldo,rashford,bruno'; +CREATE EXTERNAL CONNECTION "foo-s3" AS 's3://foo/bar?AUTH=specified&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&AWS_REGION=us-east-1ASSUME_ROLE=ronaldo,rashford,bruno'; ---- # Reject invalid S3 URIs. @@ -107,7 +107,7 @@ pq: failed to construct External Connection details: failed to create s3 externa inspect-system-table ---- -foo-s3 STORAGE {"provider": "s3", "simpleUri": {"uri": "s3://foo/bar?AUTH=implicit&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&ASSUME_ROLE=ronaldo,rashford,bruno"}} root 1 +foo-s3 STORAGE {"provider": "s3", "simpleUri": {"uri": "s3://foo/bar?AUTH=specified&AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456&AWS_REGION=us-east-1ASSUME_ROLE=ronaldo,rashford,bruno"}} root 1 exec-sql DROP EXTERNAL CONNECTION "foo-s3"; diff --git a/pkg/ccl/cmdccl/clusterrepl/main.go b/pkg/ccl/cmdccl/clusterrepl/main.go index 7ad34694583c..9f51292cd787 100644 --- a/pkg/ccl/cmdccl/clusterrepl/main.go +++ b/pkg/ccl/cmdccl/clusterrepl/main.go @@ -85,7 +85,7 @@ func main() { if errors.Is(err, context.Canceled) { exit.WithCode(exit.Interrupted()) } else { - fatalf(err.Error()) + fatalf("%s", err) } } } diff --git a/pkg/ccl/crosscluster/logical/BUILD.bazel b/pkg/ccl/crosscluster/logical/BUILD.bazel index c8c81c143222..c0a217e215eb 100644 --- a/pkg/ccl/crosscluster/logical/BUILD.bazel +++ b/pkg/ccl/crosscluster/logical/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/ccl/crosscluster/physical", "//pkg/ccl/crosscluster/streamclient", "//pkg/ccl/utilccl", + "//pkg/cloud", "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", @@ -68,6 +69,7 @@ go_library( "//pkg/sql/sessiondatapb", "//pkg/sql/syntheticprivilege", "//pkg/sql/types", + "//pkg/util/admission/admissionpb", "//pkg/util/ctxgroup", "//pkg/util/hlc", "//pkg/util/log", @@ -113,6 +115,7 @@ go_test( "//pkg/ccl/storageccl", "//pkg/jobs", "//pkg/jobs/jobspb", + "//pkg/kv", "//pkg/kv/kvpb", "//pkg/kv/kvserver", "//pkg/repstream/streampb", @@ -124,11 +127,13 @@ go_test( "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/descs", "//pkg/sql/catalog/desctestutils", "//pkg/sql/execinfra", "//pkg/sql/isql", "//pkg/sql/randgen", "//pkg/sql/rowenc", + "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/testutils", "//pkg/testutils/jobutils", @@ -149,6 +154,7 @@ go_test( "//pkg/util/timeutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", + "@com_github_lib_pq//:pq", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/crosscluster/logical/create_logical_replication_stmt.go b/pkg/ccl/crosscluster/logical/create_logical_replication_stmt.go index 745950717ccf..a3320126adf5 100644 --- a/pkg/ccl/crosscluster/logical/create_logical_replication_stmt.go +++ b/pkg/ccl/crosscluster/logical/create_logical_replication_stmt.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/crosscluster" "github.com/cockroachdb/cockroach/pkg/ccl/crosscluster/streamclient" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" + "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" @@ -99,6 +100,30 @@ func createLogicalReplicationStreamPlanHook( return pgerror.New(pgcode.InvalidParameterValue, "the same number of source and destination tables must be specified") } + options, err := evalLogicalReplicationOptions(ctx, stmt.Options, exprEval, p) + if err != nil { + return err + } + + hasUDF := len(options.userFunctions) > 0 || options.defaultFunction != nil && options.defaultFunction.FunctionId != 0 + + mode := jobspb.LogicalReplicationDetails_Immediate + if m, ok := options.GetMode(); ok { + switch m { + case "immediate": + if hasUDF { + return pgerror.Newf(pgcode.InvalidParameterValue, "MODE = 'immediate' cannot be used with user-defined functions") + } + case "validated": + mode = jobspb.LogicalReplicationDetails_Validated + default: + return pgerror.Newf(pgcode.InvalidParameterValue, "unknown mode %q", m) + } + } else if hasUDF { + // UDFs imply applying changes via SQL, which implies validation. + mode = jobspb.LogicalReplicationDetails_Validated + } + var ( targetsDescription string srcTableNames = make([]string, len(stmt.From.Tables)) @@ -152,6 +177,16 @@ func createLogicalReplicationStreamPlanHook( } else { targetsDescription += ", " + tbNameWithSchema.FQString() } + + if mode != jobspb.LogicalReplicationDetails_Validated { + fks := td.OutboundForeignKeys() + for _, fk := range append(fks[:len(fks):len(fks)], td.InboundForeignKeys()...) { + // TODO(dt): move the constraint to un-validated for them. + if fk.IsConstraintValidated() { + return pgerror.Newf(pgcode.InvalidParameterValue, "only 'NOT VALID' foreign keys are only supported with MODE = 'validated'") + } + } + } } streamAddress := crosscluster.StreamAddress(from) @@ -161,6 +196,11 @@ func createLogicalReplicationStreamPlanHook( } streamAddress = crosscluster.StreamAddress(streamURL.String()) + cleanedURI, err := cloud.SanitizeExternalStorageURI(from, nil) + if err != nil { + return err + } + client, err := streamclient.NewStreamClient(ctx, streamAddress, p.ExecCfg().InternalDB, streamclient.WithLogical()) if err != nil { return err @@ -184,10 +224,6 @@ func createLogicalReplicationStreamPlanHook( repPairs[i].SrcDescriptorID = int32(spec.TableDescriptors[name].ID) } - options, err := evalLogicalReplicationOptions(ctx, stmt.Options, exprEval, p) - if err != nil { - return err - } replicationStartTime := spec.ReplicationStartTime progress := jobspb.LogicalReplicationProgress{} if cursor, ok := options.GetCursor(); ok { @@ -210,16 +246,18 @@ func createLogicalReplicationStreamPlanHook( jr := jobs.Record{ JobID: p.ExecCfg().JobRegistry.MakeJobID(), - Description: fmt.Sprintf("LOGICAL REPLICATION STREAM into %s from %s", targetsDescription, streamAddress), + Description: fmt.Sprintf("LOGICAL REPLICATION STREAM into %s from %s", targetsDescription, cleanedURI), Username: p.User(), Details: jobspb.LogicalReplicationDetails{ - StreamID: uint64(spec.StreamID), - SourceClusterID: spec.SourceClusterID, - ReplicationStartTime: replicationStartTime, - SourceClusterConnStr: string(streamAddress), - ReplicationPairs: repPairs, - TableNames: srcTableNames, - DefaultConflictResolution: defaultConflictResolution, + StreamID: uint64(spec.StreamID), + SourceClusterID: spec.SourceClusterID, + ReplicationStartTime: replicationStartTime, + SourceClusterConnStr: string(streamAddress), + ReplicationPairs: repPairs, + TableNames: srcTableNames, + DefaultConflictResolution: defaultConflictResolution, + IgnoreCDCIgnoredTTLDeletes: options.IgnoreCDCIgnoredTTLDeletes(), + Mode: mode, }, Progress: progress, } @@ -248,6 +286,9 @@ func createLogicalReplicationStreamTypeCheck( stmt.Options.DefaultFunction, stmt.Options.Mode, }, + exprutil.Bools{ + stmt.Options.IgnoreCDCIgnoredTTLDeletes, + }, } if err := exprutil.TypeCheck(ctx, "LOGICAL REPLICATION STREAM", p.SemaCtx(), toTypeCheck..., @@ -259,11 +300,12 @@ func createLogicalReplicationStreamTypeCheck( } type resolvedLogicalReplicationOptions struct { - cursor *hlc.Timestamp - mode *string + cursor hlc.Timestamp + mode string defaultFunction *jobspb.LogicalReplicationDetails_DefaultConflictResolution // Mapping of table name to function descriptor - userFunctions map[string]int32 + userFunctions map[string]int32 + ignoreCDCIgnoredTTLDeletes bool } func evalLogicalReplicationOptions( @@ -278,7 +320,7 @@ func evalLogicalReplicationOptions( if err != nil { return nil, err } - r.mode = &mode + r.mode = mode } if options.Cursor != nil { cursor, err := eval.String(ctx, options.Cursor) @@ -290,7 +332,7 @@ func evalLogicalReplicationOptions( if err != nil { return nil, err } - r.cursor = &asOf.Timestamp + r.cursor = asOf.Timestamp } if options.DefaultFunction != nil { defaultResolution := &jobspb.LogicalReplicationDetails_DefaultConflictResolution{} @@ -337,6 +379,10 @@ func evalLogicalReplicationOptions( r.userFunctions[objName.String()] = descID } } + + if options.IgnoreCDCIgnoredTTLDeletes == tree.DBoolTrue { + r.ignoreCDCIgnoredTTLDeletes = true + } return r, nil } @@ -359,17 +405,17 @@ func lookupFunctionID( } func (r *resolvedLogicalReplicationOptions) GetCursor() (hlc.Timestamp, bool) { - if r == nil || r.cursor == nil { + if r == nil || r.cursor.IsEmpty() { return hlc.Timestamp{}, false } - return *r.cursor, true + return r.cursor, true } func (r *resolvedLogicalReplicationOptions) GetMode() (string, bool) { - if r == nil || r.mode == nil { + if r == nil || r.mode == "" { return "", false } - return *r.mode, true + return r.mode, true } func (r *resolvedLogicalReplicationOptions) GetDefaultFunction() ( @@ -388,3 +434,10 @@ func (r *resolvedLogicalReplicationOptions) GetUserFunctions() (map[string]int32 } return r.userFunctions, true } + +func (r *resolvedLogicalReplicationOptions) IgnoreCDCIgnoredTTLDeletes() bool { + if r == nil { + return false + } + return r.ignoreCDCIgnoredTTLDeletes +} diff --git a/pkg/ccl/crosscluster/logical/dead_letter_queue.go b/pkg/ccl/crosscluster/logical/dead_letter_queue.go index 62843faa0414..f745e376c414 100644 --- a/pkg/ccl/crosscluster/logical/dead_letter_queue.go +++ b/pkg/ccl/crosscluster/logical/dead_letter_queue.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdcevent" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -36,7 +37,7 @@ const ( dlq_timestamp TIMESTAMPTZ NOT NULL DEFAULT now():::TIMESTAMPTZ, dlq_reason STRING NOT NULL, mutation_type %s.%s.mutation_type, - key_value_bytes BYTES NOT NULL, + key_value_bytes BYTES NOT NULL NOT VISIBLE, incoming_row JSONB, -- PK should be unique based on the ID, job ID and timestamp at which the -- row was written to the table. @@ -62,21 +63,22 @@ const ( ) VALUES ($1, $2, $3, $4, $5)` ) -type fullyQualifiedTableName struct { +type dstTableMetadata struct { database string schema string table string + tableID descpb.ID } -func (f fullyQualifiedTableName) getDatabase() string { +func (f dstTableMetadata) getDatabaseName() string { return lexbase.EscapeSQLIdent(f.database) } -func (f fullyQualifiedTableName) toDLQTableName(tableID int32) string { +func (f dstTableMetadata) toDLQTableName() string { return fmt.Sprintf(dlqBaseTableName, - f.getDatabase(), + f.getDatabaseName(), dlqSchemaName, - lexbase.EscapeSQLIdent(fmt.Sprintf("dlq_%d_%s_%s", tableID, f.schema, f.table))) + lexbase.EscapeSQLIdent(fmt.Sprintf("dlq_%d_%s_%s", f.tableID, f.schema, f.table))) } type DeadLetterQueueClient interface { @@ -92,14 +94,14 @@ type DeadLetterQueueClient interface { ) error } -type loggingDeadLetterQueueClient struct { +type noopDeadLetterQueueClient struct { } -func (dlq *loggingDeadLetterQueueClient) Create(ctx context.Context) error { +func (dlq *noopDeadLetterQueueClient) Create(_ context.Context) error { return nil } -func (dlq *loggingDeadLetterQueueClient) Log( +func (dlq *noopDeadLetterQueueClient) Log( ctx context.Context, ingestionJobID int64, kv streampb.StreamEvent_KV, @@ -135,27 +137,27 @@ func (dlq *loggingDeadLetterQueueClient) Log( } type deadLetterQueueClient struct { - ie isql.Executor - tableIDToName map[int32]fullyQualifiedTableName + ie isql.Executor + srcTableIDToDestMeta map[descpb.ID]dstTableMetadata } func (dlq *deadLetterQueueClient) Create(ctx context.Context) error { // Create a dlq table for each table to be replicated. - for tableID, name := range dlq.tableIDToName { - dlqTableName := name.toDLQTableName(tableID) - createSchemaStmt := fmt.Sprintf(createSchemaBaseStmt, name.getDatabase(), dlqSchemaName) + for _, dstTableMeta := range dlq.srcTableIDToDestMeta { + dlqTableName := dstTableMeta.toDLQTableName() + createSchemaStmt := fmt.Sprintf(createSchemaBaseStmt, dstTableMeta.getDatabaseName(), dlqSchemaName) if _, err := dlq.ie.Exec(ctx, "create-dlq-schema", nil, createSchemaStmt); err != nil { - return errors.Wrapf(err, "failed to create crdb_replication schema in database %s", name.getDatabase()) + return errors.Wrapf(err, "failed to create crdb_replication schema in database %s", dstTableMeta.getDatabaseName()) } - createEnumStmt := fmt.Sprintf(createEnumBaseStmt, name.getDatabase(), dlqSchemaName) + createEnumStmt := fmt.Sprintf(createEnumBaseStmt, dstTableMeta.getDatabaseName(), dlqSchemaName) if _, err := dlq.ie.Exec(ctx, "create-dlq-enum", nil, createEnumStmt); err != nil { - return errors.Wrapf(err, "failed to create mutation_type enum in database %s", name.getDatabase()) + return errors.Wrapf(err, "failed to create mutation_type enum in database %s", dstTableMeta.getDatabaseName()) } - createTableStmt := fmt.Sprintf(createTableBaseStmt, dlqTableName, name.getDatabase(), dlqSchemaName) + createTableStmt := fmt.Sprintf(createTableBaseStmt, dlqTableName, dstTableMeta.getDatabaseName(), dlqSchemaName) if _, err := dlq.ie.Exec(ctx, "create-dlq-table", nil, createTableStmt); err != nil { - return errors.Wrapf(err, "failed to create dlq for table %d", tableID) + return errors.Wrapf(err, "failed to create dlq for table %d", dstTableMeta.tableID) } } return nil @@ -167,18 +169,19 @@ func (dlq *deadLetterQueueClient) Log( kv streampb.StreamEvent_KV, cdcEventRow cdcevent.Row, reason error, - stoppedRetyingReason retryEligibility, + stoppedRetryingReason retryEligibility, ) error { if !cdcEventRow.IsInitialized() { return errors.New("cdc event row not initialized") } - tableID := int32(cdcEventRow.TableID) - qualifiedName, ok := dlq.tableIDToName[tableID] + // TableID in cdcEventRow is the source table ID. + srcTableID := cdcEventRow.TableID + dstTableMeta, ok := dlq.srcTableIDToDestMeta[srcTableID] if !ok { - return errors.Newf("failed to look up fully qualified name for table %d", tableID) + return errors.Newf("failed to look up fully qualified name for table %d", dstTableMeta.tableID) } - dlqTableName := qualifiedName.toDLQTableName(tableID) + dlqTableName := dstTableMeta.toDLQTableName() bytes, err := protoutil.Marshal(&kv) if err != nil { @@ -202,8 +205,8 @@ func (dlq *deadLetterQueueClient) Log( nil, /* txn */ fmt.Sprintf(insertRowStmtFallBack, dlqTableName), ingestionJobID, - tableID, - fmt.Sprintf("%s (%s)", reason, stoppedRetyingReason), + dstTableMeta.tableID, + fmt.Sprintf("%s (%s)", reason, stoppedRetryingReason), mutationType.String(), bytes, ); err != nil { @@ -218,8 +221,8 @@ func (dlq *deadLetterQueueClient) Log( nil, /* txn */ fmt.Sprintf(insertBaseStmt, dlqTableName), ingestionJobID, - tableID, - fmt.Sprintf("%s (%s)", reason, stoppedRetyingReason), + dstTableMeta.tableID, + fmt.Sprintf("%s (%s)", reason, stoppedRetryingReason), mutationType.String(), bytes, jsonRow, @@ -230,14 +233,14 @@ func (dlq *deadLetterQueueClient) Log( } func InitDeadLetterQueueClient( - ie isql.Executor, tableIDToName map[int32]fullyQualifiedTableName, + ie isql.Executor, srcTableIDToDestMeta map[descpb.ID]dstTableMetadata, ) DeadLetterQueueClient { if testingDLQ != nil { return testingDLQ } return &deadLetterQueueClient{ - ie: ie, - tableIDToName: tableIDToName, + ie: ie, + srcTableIDToDestMeta: srcTableIDToDestMeta, } } @@ -251,6 +254,6 @@ func TestingSetDLQ(d DeadLetterQueueClient) func() { return func() { testingDLQ = v } } -func InitLoggingDeadLetterQueueClient() DeadLetterQueueClient { - return &loggingDeadLetterQueueClient{} +func InitNoopDeadLetterQueueClient() DeadLetterQueueClient { + return &noopDeadLetterQueueClient{} } diff --git a/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go b/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go index e1a860faf007..3d6600055ac0 100644 --- a/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go +++ b/pkg/ccl/crosscluster/logical/dead_letter_queue_test.go @@ -18,7 +18,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdcevent" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" + "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -26,8 +28,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -41,9 +45,111 @@ import ( const ( defaultDbName = "defaultdb" publicScName = "public" + dbAName = "a" ) -func TestLoggingDLQClient(t *testing.T) { +func setupDLQTestTables( + ctx context.Context, + t *testing.T, + sqlDB *sqlutils.SQLRunner, + kvDB *kv.DB, + srv serverutils.TestServerInterface, +) ( + tableNameToDesc map[string]catalog.TableDescriptor, + srcTableIDToName map[descpb.ID]dstTableMetadata, + expectedDLQTables []string, + ie isql.Executor, +) { + s := srv.ApplicationLayer() + sd := sql.NewInternalSessionData(ctx, s.ClusterSettings(), "" /* opName */) + ie = s.InternalDB().(isql.DB).Executor(isql.WithSessionData(sd)) + + sqlDB.Exec(t, `CREATE TABLE foo (a INT)`) + + sqlDB.Exec(t, `CREATE SCHEMA baz`) + sqlDB.Exec(t, `CREATE TABLE baz.foo (a INT)`) + sqlDB.Exec(t, `CREATE SCHEMA bar_`) + sqlDB.Exec(t, `CREATE TABLE bar_.foo (a INT)`) + sqlDB.Exec(t, `CREATE SCHEMA bar`) + sqlDB.Exec(t, `CREATE TABLE bar._foo (a INT)`) + + sqlDB.Exec(t, `CREATE DATABASE a`) + sqlDB.Exec(t, `CREATE SCHEMA a.baz`) + sqlDB.Exec(t, `CREATE TABLE a.public.bar (a INT)`) + sqlDB.Exec(t, `CREATE TABLE a.baz.foo (a INT)`) + + dstTableMeta := []dstTableMetadata{ + // Base test case. + { + database: defaultDbName, + schema: publicScName, + table: "foo", + tableID: 1, + }, + // Verify that distinct DLQ tables are created for tables + // in different databases with identical schema and table + // names. + { + database: defaultDbName, + schema: "baz", + table: "foo", + tableID: 1, + }, + { + database: dbAName, + schema: "baz", + table: "foo", + tableID: 1, + }, + // Verify that distinct DLQ tables are created for tables + // with identical fully qualified names and distinct + // table IDs. + { + database: defaultDbName, + schema: "bar", + table: "_foo", + tableID: 1, + }, + { + database: defaultDbName, + schema: "bar_", + table: "foo", + tableID: 2, + }, + } + + tableNameToDesc = make(map[string]catalog.TableDescriptor) + srcTableIDToName = make(map[descpb.ID]dstTableMetadata) + expectedDLQTables = []string{} + + for _, md := range dstTableMeta { + desc := desctestutils.TestingGetTableDescriptor(kvDB, s.Codec(), md.database, md.schema, md.table) + srcTableID := desc.GetID() + srcTableIDToName[srcTableID] = md + fullyQualifiedName := fmt.Sprintf("%s.%s.%s", md.database, md.schema, md.table) + tableNameToDesc[fullyQualifiedName] = desc + expectedDLQTables = append(expectedDLQTables, fmt.Sprintf("dlq_%d_%s_%s", md.tableID, md.schema, md.table)) + } + return tableNameToDesc, srcTableIDToName, expectedDLQTables, ie +} + +func WaitForDLQLogs(t *testing.T, db *sqlutils.SQLRunner, tableName string, numRowsExpected int) { + t.Logf("waiting for write conflicts to be logged in DLQ table %s", tableName) + testutils.SucceedsSoon(t, func() error { + query := fmt.Sprintf("SELECT count(*) FROM %s", tableName) + var numRows int + db.QueryRow(t, query).Scan(&numRows) + if numRows != numRowsExpected { + return errors.Newf("Expected DLQ table '%s' to have %d rows, received %d rows instead", + tableName, + numRowsExpected, + numRows) + } + return nil + }) +} + +func TestNoopDLQClient(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() @@ -65,7 +171,7 @@ func TestLoggingDLQClient(t *testing.T) { ed, err := cdcevent.NewEventDescriptor(tableDesc, familyDesc, false, false, hlc.Timestamp{}) require.NoError(t, err) - dlqClient := InitLoggingDeadLetterQueueClient() + dlqClient := InitNoopDeadLetterQueueClient() require.NoError(t, dlqClient.Create(ctx)) type testCase struct { @@ -105,88 +211,18 @@ func TestLoggingDLQClient(t *testing.T) { } } -func TestDLQClient(t *testing.T) { +func TestDLQCreation(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() srv, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) - s := srv.ApplicationLayer() - - sd := sql.NewInternalSessionData(ctx, s.ClusterSettings(), "" /* opName */) - ie := s.InternalDB().(isql.DB).Executor(isql.WithSessionData(sd)) + defer srv.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `CREATE TABLE foo (a INT)`) - - sqlDB.Exec(t, `CREATE SCHEMA baz`) - sqlDB.Exec(t, `CREATE TABLE baz.foo (a INT)`) - sqlDB.Exec(t, `CREATE SCHEMA bar_`) - sqlDB.Exec(t, `CREATE TABLE bar_.foo (a INT)`) - sqlDB.Exec(t, `CREATE SCHEMA bar`) - sqlDB.Exec(t, `CREATE TABLE bar._foo (a INT)`) - - sqlDB.Exec(t, `CREATE DATABASE a`) - sqlDB.Exec(t, `CREATE SCHEMA a.baz`) - sqlDB.Exec(t, `CREATE TABLE a.public.bar (a INT)`) - sqlDB.Exec(t, `CREATE TABLE a.baz.foo (a INT)`) - - dbAName := "a" - - tableNames := []fullyQualifiedTableName{ - { - database: defaultDbName, - schema: publicScName, - table: "foo", - }, - { - database: defaultDbName, - schema: "baz", - table: "foo", - }, - { - database: defaultDbName, - schema: "bar", - table: "_foo", - }, - { - database: defaultDbName, - schema: "bar_", - table: "foo", - }, - { - database: dbAName, - schema: publicScName, - table: "bar", - }, - { - database: dbAName, - schema: "baz", - table: "foo", - }, - } - - tableNameToDesc := make(map[string]catalog.TableDescriptor) - tableIDToName := make(map[int32]fullyQualifiedTableName) - var expectedDLQTables []string - - for _, name := range tableNames { - desc := desctestutils.TestingGetTableDescriptor(kvDB, s.Codec(), name.database, name.schema, name.table) - tableID := int32(desc.GetID()) - tableIDToName[tableID] = name - fullyQualifiedName := fmt.Sprintf("%s.%s.%s", name.database, name.schema, name.table) - tableNameToDesc[fullyQualifiedName] = desc - expectedDLQTables = append(expectedDLQTables, fmt.Sprintf("dlq_%d_%s_%s", tableID, name.schema, name.table)) - } - - // Build family desc for cdc event row - familyDesc := &descpb.ColumnFamilyDescriptor{ - ID: descpb.FamilyID(1), - Name: "", - } + _, srcTableIDToName, expectedDLQTables, ie := setupDLQTestTables(ctx, t, sqlDB, kvDB, srv) - dlqClient := InitDeadLetterQueueClient(ie, tableIDToName) + dlqClient := InitDeadLetterQueueClient(ie, srcTableIDToName) require.NoError(t, dlqClient.Create(ctx)) // Verify DLQ tables are created with their expected names @@ -212,6 +248,27 @@ func TestDLQClient(t *testing.T) { fmt.Sprintf(`SELECT schema, name, values FROM [SHOW ENUMS FROM %s.%s]`, defaultDbName, dlqSchemaName), enumRow) sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT schema, name, values FROM [SHOW ENUMS FROM %s.%s]`, dbAName, dlqSchemaName), enumRow) +} + +func TestDLQLogging(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + srv, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + + sqlDB := sqlutils.MakeSQLRunner(db) + tableNameToDesc, srcTableIDToName, _, ie := setupDLQTestTables(ctx, t, sqlDB, kvDB, srv) + + // Build family desc for cdc event row + familyDesc := &descpb.ColumnFamilyDescriptor{ + ID: descpb.FamilyID(1), + Name: "", + } + + dlqClient := InitDeadLetterQueueClient(ie, srcTableIDToName) + require.NoError(t, dlqClient.Create(ctx)) type testCase struct { name string @@ -254,13 +311,6 @@ func TestDLQClient(t *testing.T) { dlqReason: noSpace, mutationType: insertMutation, }, - { - name: "insert dlq fallback row for a.public.bar", - jobID: 1, - tableDesc: tableNameToDesc["a.public.bar"], - dlqReason: noSpace, - mutationType: insertMutation, - }, { name: "insert dlq fallback row for a.baz.foo", jobID: 1, @@ -276,7 +326,7 @@ func TestDLQClient(t *testing.T) { type dlqRow struct { jobID int64 - tableID int32 + tableID descpb.ID dlqReason string mutationType string kv []byte @@ -300,8 +350,8 @@ func TestDLQClient(t *testing.T) { if tc.expectedErrMsg == "" { require.NoError(t, err) - tableID := int32(tc.tableDesc.GetID()) - name, ok := tableIDToName[tableID] + srcTableID := tc.tableDesc.GetID() + md, ok := srcTableIDToName[srcTableID] require.True(t, ok) actualRow := dlqRow{} @@ -312,7 +362,7 @@ func TestDLQClient(t *testing.T) { mutation_type, key_value_bytes, incoming_row - FROM %s`, name.toDLQTableName(tableID))).Scan( + FROM %s`, md.toDLQTableName())).Scan( &actualRow.jobID, &actualRow.tableID, &actualRow.dlqReason, @@ -326,7 +376,7 @@ func TestDLQClient(t *testing.T) { expectedRow := dlqRow{ jobID: tc.jobID, - tableID: tableID, + tableID: md.tableID, dlqReason: fmt.Sprintf("%s (%s)", tc.applyError.Error(), tc.dlqReason), mutationType: tc.mutationType.String(), kv: bytes, @@ -377,13 +427,13 @@ func TestDLQJSONQuery(t *testing.T) { ie := srv.InternalDB().(isql.DB).Executor() defer cleanup() - tableID := int32(tableDesc.GetID()) - tableName := fullyQualifiedTableName{ + tableID := tableDesc.GetID() + tableName := dstTableMetadata{ database: defaultDbName, schema: publicScName, table: "foo", } - dlqClient := InitDeadLetterQueueClient(ie, map[int32]fullyQualifiedTableName{ + dlqClient := InitDeadLetterQueueClient(ie, map[descpb.ID]dstTableMetadata{ tableID: tableName, }) require.NoError(t, dlqClient.Create(ctx)) @@ -398,7 +448,7 @@ func TestDLQJSONQuery(t *testing.T) { require.NoError(t, err) require.NoError(t, dlqClient.Log(ctx, 1, streampb.StreamEvent_KV{KeyValue: kv}, updatedRow, errInjected, noSpace)) - dlqtableName := tableName.toDLQTableName(tableID) + dlqtableName := tableName.toDLQTableName() var ( a int @@ -410,3 +460,92 @@ func TestDLQJSONQuery(t *testing.T) { require.Equal(t, "hello", b) require.NotZero(t, rowID) } + +// TestEndToEndDLQ tests that write conflicts that occur during an +// LDR job are persisted to its corresponding DLQ table +func TestEndToEndDLQ(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + testDLQClusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(127241), + Knobs: base.TestingKnobs{ + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + DistSQL: &execinfra.TestingKnobs{ + StreamingTestingKnobs: &sql.StreamingTestingKnobs{ + FailureRate: 100, + }, + }, + }, + }, + } + + server, s, dbA, dbB := setupLogicalTestServer(t, ctx, testDLQClusterArgs, 1) + defer server.Stopper().Stop(ctx) + + _, cleanup := s.PGUrl(t, serverutils.DBName("a")) + defer cleanup() + dbBURL, cleanupB := s.PGUrl(t, serverutils.DBName("b")) + defer cleanupB() + + var expectedJobID jobspb.JobID + dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH DEFAULT FUNCTION = 'dlq'", dbBURL.String()).Scan(&expectedJobID) + + now := s.Clock().Now() + WaitUntilReplicatedTime(t, now, dbA, expectedJobID) + + dbB.Exec(t, "INSERT INTO tab VALUES (3, 'celeriac')") + dbB.Exec(t, "UPSERT INTO tab VALUES (1, 'goodbye, again')") + + expectedTableID := sqlutils.QueryTableID(t, server.Conns[0], "a", "public", "tab") + dlqTableName := fmt.Sprintf("crdb_replication.dlq_%d_public_tab", expectedTableID) + WaitForDLQLogs(t, dbA, dlqTableName, 2) + + var ( + jobID jobspb.JobID + tableID uint32 + dlqReason string + mutationType string + ) + + dbA.QueryRow(t, fmt.Sprintf(` + SELECT + ingestion_job_id, + table_id, + dlq_reason, + mutation_type + FROM %s + `, dlqTableName)).Scan( + &jobID, + &tableID, + &dlqReason, + &mutationType, + ) + + require.Equal(t, expectedJobID, jobID) + require.Equal(t, expectedTableID, tableID) + // DLQ reason is set to `tooOld` when `errInjected` is thrown by `failureInjector` + require.Equal(t, fmt.Sprintf("%s (%s)", errInjected, tooOld), dlqReason) + require.Equal(t, insertMutation.String(), mutationType) + + dbA.CheckQueryResults( + t, + fmt.Sprintf(` + SELECT + incoming_row->>'payload' AS payload, + incoming_row->>'pk' AS pk + FROM %s + ORDER BY pk + `, dlqTableName), + [][]string{ + { + "goodbye, again", "1", + }, + { + "celeriac", "3", + }, + }, + ) +} diff --git a/pkg/ccl/crosscluster/logical/logical_replication_dist.go b/pkg/ccl/crosscluster/logical/logical_replication_dist.go index 3a2e2a4007ed..36fce40dc024 100644 --- a/pkg/ccl/crosscluster/logical/logical_replication_dist.go +++ b/pkg/ccl/crosscluster/logical/logical_replication_dist.go @@ -35,6 +35,8 @@ func constructLogicalReplicationWriterSpecs( tableMd map[int32]execinfrapb.TableReplicationMetadata, jobID jobspb.JobID, streamID streampb.StreamID, + ignoreCDCIgnoredTTLDeletes bool, + mode jobspb.LogicalReplicationDetails_ApplyMode, ) (map[base.SQLInstanceID][]execinfrapb.LogicalReplicationWriterSpec, error) { spanGroup := roachpb.SpanGroup{} baseSpec := execinfrapb.LogicalReplicationWriterSpec{ @@ -45,6 +47,8 @@ func constructLogicalReplicationWriterSpecs( Checkpoint: checkpoint, // TODO: Only forward relevant checkpoint info StreamAddress: string(streamAddress), TableMetadata: tableMd, + IgnoreCDCIgnoredTTLDeletes: ignoreCDCIgnoredTTLDeletes, + Mode: mode, } writerSpecs := make(map[base.SQLInstanceID][]execinfrapb.LogicalReplicationWriterSpec, len(destSQLInstances)) @@ -55,7 +59,7 @@ func constructLogicalReplicationWriterSpecs( destID := matcher.FindMatch(candidate.ClosestDestIDs) partition := candidate.Partition - log.Infof(ctx, "logical replication src-dst pair candidate: %s (locality %s) - %d ("+ + log.VInfof(ctx, 2, "logical replication src-dst pair candidate: %s (locality %s) - %d ("+ "locality %s)", partition.ID, partition.SrcLocality, diff --git a/pkg/ccl/crosscluster/logical/logical_replication_job.go b/pkg/ccl/crosscluster/logical/logical_replication_job.go index ef26ecbe2270..f43b569dd45f 100644 --- a/pkg/ccl/crosscluster/logical/logical_replication_job.go +++ b/pkg/ccl/crosscluster/logical/logical_replication_job.go @@ -158,7 +158,7 @@ func (r *logicalReplicationResumer) ingest( } // TODO(azhu): add a flag to avoid recreating dlq tables during replanning - dlqClient := InitDeadLetterQueueClient(execCfg.InternalDB.Executor(), planInfo.tableIDsToNames) + dlqClient := InitDeadLetterQueueClient(execCfg.InternalDB.Executor(), planInfo.srcTableIDsToDestMeta) if err := dlqClient.Create(ctx); err != nil { return errors.Wrap(err, "failed to create dead letter queue") } @@ -292,9 +292,9 @@ type logicalReplicationPlanner struct { } type logicalReplicationPlanInfo struct { - sourceSpans []roachpb.Span - streamAddress []string - tableIDsToNames map[int32]fullyQualifiedTableName + sourceSpans []roachpb.Span + streamAddress []string + srcTableIDsToDestMeta map[descpb.ID]dstTableMetadata } func makeLogicalReplicationPlanner( @@ -331,7 +331,7 @@ func (p *logicalReplicationPlanner) generatePlanImpl( progress = p.job.Progress().Details.(*jobspb.Progress_LogicalReplication).LogicalReplication payload = p.job.Payload().Details.(*jobspb.Payload_LogicalReplicationDetails).LogicalReplicationDetails info = logicalReplicationPlanInfo{ - tableIDsToNames: make(map[int32]fullyQualifiedTableName), + srcTableIDsToDestMeta: make(map[descpb.ID]dstTableMetadata), } ) asOf := progress.ReplicatedTime @@ -390,10 +390,11 @@ func (p *logicalReplicationPlanner) generatePlanImpl( DestinationTableName: dstTableDesc.GetName(), DestinationFunctionOID: uint32(fnOID), } - info.tableIDsToNames[pair.DstDescriptorID] = fullyQualifiedTableName{ + info.srcTableIDsToDestMeta[descpb.ID(pair.SrcDescriptorID)] = dstTableMetadata{ database: dbDesc.GetName(), schema: scDesc.GetName(), table: dstTableDesc.GetName(), + tableID: descpb.ID(pair.DstDescriptorID), } } return nil @@ -419,7 +420,10 @@ func (p *logicalReplicationPlanner) generatePlanImpl( progress.Checkpoint, tablesMd, p.job.ID(), - streampb.StreamID(payload.StreamID)) + streampb.StreamID(payload.StreamID), + payload.IgnoreCDCIgnoredTTLDeletes, + payload.Mode, + ) if err != nil { return nil, nil, info, err } @@ -575,6 +579,9 @@ func (r *logicalReplicationResumer) ingestWithRetries( func loadOnlineReplicatedTime( ctx context.Context, db isql.DB, ingestionJob *jobs.Job, ) hlc.Timestamp { + // TODO(ssd): Isn't this load redundant? The Update API for + // the job also updates the local copy of the job with the + // latest progress. progress, err := jobs.LoadJobProgress(ctx, db, ingestionJob.ID()) if err != nil { log.Warningf(ctx, "error loading job progress: %s", err) @@ -588,20 +595,56 @@ func loadOnlineReplicatedTime( } // OnFailOrCancel implements jobs.Resumer interface -func (h *logicalReplicationResumer) OnFailOrCancel( +func (r *logicalReplicationResumer) OnFailOrCancel( ctx context.Context, execCtx interface{}, _ error, ) error { execCfg := execCtx.(sql.JobExecContext).ExecCfg() metrics := execCfg.JobRegistry.MetricsStruct().JobSpecificMetrics[jobspb.TypeLogicalReplication].(*Metrics) metrics.ReplicatedTimeSeconds.Update(0) + r.completeProducerJob(ctx, execCfg.InternalDB) return nil } // CollectProfile implements jobs.Resumer interface -func (h *logicalReplicationResumer) CollectProfile(_ context.Context, _ interface{}) error { +func (r *logicalReplicationResumer) CollectProfile(_ context.Context, _ interface{}) error { return nil } +func (r *logicalReplicationResumer) completeProducerJob( + ctx context.Context, internalDB *sql.InternalDB, +) { + var ( + progress = r.job.Progress().Details.(*jobspb.Progress_LogicalReplication).LogicalReplication + payload = r.job.Details().(jobspb.LogicalReplicationDetails) + ) + + streamID := streampb.StreamID(payload.StreamID) + log.Infof(ctx, "attempting to update producer job %d", streamID) + if err := timeutil.RunWithTimeout(ctx, "complete producer job", 30*time.Second, + func(ctx context.Context) error { + client, err := streamclient.GetFirstActiveClient(ctx, + append([]string{payload.SourceClusterConnStr}, progress.StreamAddresses...), + internalDB, + streamclient.WithStreamID(streamID), + streamclient.WithLogical(), + ) + if err != nil { + return err + } + defer closeAndLog(ctx, client) + return client.Complete(ctx, streamID, false /* successfulIngestion */) + }, + ); err != nil { + log.Warningf(ctx, "error completing the source cluster producer job %d: %s", streamID, err.Error()) + } +} + +func closeAndLog(ctx context.Context, d streamclient.Dialer) { + if err := d.Close(ctx); err != nil { + log.Warningf(ctx, "error closing stream client: %s", err.Error()) + } +} + func getRetryPolicy(knobs *sql.StreamingTestingKnobs) retry.Options { if knobs != nil && knobs.DistSQLRetryPolicy != nil { return *knobs.DistSQLRetryPolicy diff --git a/pkg/ccl/crosscluster/logical/logical_replication_job_test.go b/pkg/ccl/crosscluster/logical/logical_replication_job_test.go index 0336efea4126..c0b4ed926e43 100644 --- a/pkg/ccl/crosscluster/logical/logical_replication_job_test.go +++ b/pkg/ccl/crosscluster/logical/logical_replication_job_test.go @@ -13,6 +13,7 @@ import ( "context" "fmt" "net/url" + "slices" "strings" "sync/atomic" "testing" @@ -55,6 +56,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" + "github.com/lib/pq" "github.com/stretchr/testify/require" ) @@ -276,10 +278,8 @@ func TestLogicalStreamIngestionJob(t *testing.T) { dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbAURL.String()).Scan(&jobBID) - now := server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now := s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) dbA.Exec(t, "INSERT INTO tab VALUES (2, 'potato')") @@ -287,7 +287,7 @@ func TestLogicalStreamIngestionJob(t *testing.T) { dbA.Exec(t, "UPSERT INTO tab VALUES (1, 'hello, again')") dbB.Exec(t, "UPSERT INTO tab VALUES (1, 'goodbye, again')") - now = server.Server(0).Clock().Now() + now = s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) WaitUntilReplicatedTime(t, now, dbB, jobBID) @@ -301,7 +301,7 @@ func TestLogicalStreamIngestionJob(t *testing.T) { // Verify that we didn't have the data looping problem. We expect 3 CPuts // when inserting new rows and 3 Puts when updating existing rows. - expPuts, expCPuts := 3, 3 + expPuts, expCPuts := 3, 4 if tryOptimisticInsertEnabled.Get(&s.ClusterSettings().SV) { // When performing 1 update, we don't have the prevValue set, so if // we're using the optimistic insert strategy, it would result in an @@ -347,7 +347,7 @@ func TestLogicalStreamIngestionJobWithCursor(t *testing.T) { dbA.Exec(t, "INSERT INTO tab VALUES (7, 'do not replicate')") dbB.Exec(t, "INSERT INTO tab VALUES (8, 'do not replicate')") // Perform the inserts first before starting the LDR stream. - now := server.Server(0).Clock().Now() + now := s.Clock().Now() dbA.Exec(t, "INSERT INTO tab VALUES (2, 'potato')") dbB.Exec(t, "INSERT INTO tab VALUES (3, 'celeriac')") dbA.Exec(t, "UPSERT INTO tab VALUES (1, 'hello, again')") @@ -356,10 +356,8 @@ func TestLogicalStreamIngestionJobWithCursor(t *testing.T) { dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH CURSOR=$2", dbBURL.String(), now.AsOfSystemTime()).Scan(&jobAID) dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH CURSOR=$2", dbAURL.String(), now.AsOfSystemTime()).Scan(&jobBID) - now = server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now = s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) // The rows added before the now time should remain only @@ -423,19 +421,47 @@ func TestLogicalStreamIngestionAdvancePTS(t *testing.T) { dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbAURL.String()).Scan(&jobBID) - now := server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now := s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) // The ingestion job on cluster A has a pts on cluster B. - producerJobIDB := replicationutils.GetLatestProducerJobID(t, dbB) + producerJobIDB := replicationutils.GetProducerJobIDFromLDRJob(t, dbA, jobAID) replicationutils.WaitForPTSProtection(t, ctx, dbB, s, producerJobIDB, now) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) - producerJobIDA := replicationutils.GetLatestProducerJobID(t, dbA) + producerJobIDA := replicationutils.GetProducerJobIDFromLDRJob(t, dbB, jobBID) replicationutils.WaitForPTSProtection(t, ctx, dbA, s, producerJobIDA, now) } +// TestLogicalStreamIngestionCancelUpdatesProducerJob tests whether +// the producer job's OnFailOrCancel updates the the related producer +// job, resulting in the PTS record being removed. +func TestLogicalStreamIngestionCancelUpdatesProducerJob(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + server, s, dbA, dbB := setupLogicalTestServer(t, ctx, testClusterBaseClusterArgs, 1) + defer server.Stopper().Stop(ctx) + + dbA.Exec(t, "SET CLUSTER SETTING physical_replication.producer.stream_liveness_track_frequency='50ms'") + + dbAURL, cleanup := s.PGUrl(t, serverutils.DBName("a")) + defer cleanup() + + var jobBID jobspb.JobID + dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbAURL.String()).Scan(&jobBID) + + WaitUntilReplicatedTime(t, s.Clock().Now(), dbB, jobBID) + + producerJobID := replicationutils.GetProducerJobIDFromLDRJob(t, dbB, jobBID) + jobutils.WaitForJobToRun(t, dbA, producerJobID) + + dbB.Exec(t, "CANCEL JOB $1", jobBID) + jobutils.WaitForJobToCancel(t, dbB, jobBID) + jobutils.WaitForJobToFail(t, dbA, producerJobID) + replicationutils.WaitForPTSProtectionToNotExist(t, ctx, dbA, s, producerJobID) +} + func TestLogicalStreamIngestionErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -472,7 +498,7 @@ func TestLogicalStreamIngestionErrors(t *testing.T) { if s.Codec().IsSystem() { dbB.ExpectErr(t, "kv.rangefeed.enabled must be enabled on the source cluster for logical replication", createQ, urlA) - kvserver.RangefeedEnabled.Override(ctx, &server.Server(0).ClusterSettings().SV, true) + kvserver.RangefeedEnabled.Override(ctx, &s.ClusterSettings().SV, true) } dbB.Exec(t, createQ, urlA) @@ -526,6 +552,70 @@ family f2(other_payload, v2)) serverASQL.CheckQueryResults(t, "SELECT * from tab_with_cf", expectedRows) } +func TestFilterRangefeedInReplicationStream(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + skip.UnderRace(t, "multi cluster/node config exhausts hardware") + + ctx := context.Background() + + filterVal := []bool{} + var filterValLock syncutil.Mutex + + clusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + DefaultTestTenant: base.TestControlsTenantsExplicitly, + Knobs: base.TestingKnobs{ + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + DistSQL: &execinfra.TestingKnobs{ + StreamingTestingKnobs: &sql.StreamingTestingKnobs{ + BeforeClientSubscribe: func(_ string, _ string, _ span.Frontier, filterRangefeed bool) { + filterValLock.Lock() + defer filterValLock.Unlock() + filterVal = append(filterVal, filterRangefeed) + }, + }, + }, + }, + }, + } + + server, s, dbA, dbB := setupLogicalTestServer(t, ctx, clusterArgs, 1) + defer server.Stopper().Stop(ctx) + + dbAURL, cleanup := s.PGUrl(t, serverutils.DBName("a")) + defer cleanup() + dbBURL, cleanupB := s.PGUrl(t, serverutils.DBName("b")) + defer cleanupB() + + var ( + jobAID jobspb.JobID + jobBID jobspb.JobID + ) + + dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) + dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH IGNORE_CDC_IGNORED_TTL_DELETES", dbAURL.String()).Scan(&jobBID) + + now := server.Server(0).Clock().Now() + t.Logf("waiting for replication job %d", jobAID) + WaitUntilReplicatedTime(t, now, dbA, jobAID) + t.Logf("waiting for replication job %d", jobBID) + WaitUntilReplicatedTime(t, now, dbB, jobBID) + + // Verify that Job contains FilterRangeFeed + details := jobutils.GetJobPayload(t, dbA, jobAID).GetLogicalReplicationDetails() + require.False(t, details.IgnoreCDCIgnoredTTLDeletes) + + details = jobutils.GetJobPayload(t, dbB, jobBID).GetLogicalReplicationDetails() + require.True(t, details.IgnoreCDCIgnoredTTLDeletes) + + require.Equal(t, len(filterVal), 2) + + // Only one should be true + require.True(t, filterVal[0] != filterVal[1]) +} + func TestRandomTables(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -547,7 +637,7 @@ func TestRandomTables(t *testing.T) { // We do not have full support for column families. randgen.SkipColumnFamilyMutation()) stmt := tree.SerializeForDisplay(createStmt) - t.Logf(stmt) + t.Log(stmt) runnerA.Exec(t, stmt) runnerB.Exec(t, stmt) @@ -578,7 +668,6 @@ func TestRandomTables(t *testing.T) { var jobBID jobspb.JobID runnerB.QueryRow(t, streamStartStmt, dbAURL.String()).Scan(&jobBID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) compareReplicatedTables(t, s, "a", "b", tableName, runnerA, runnerB) @@ -683,13 +772,6 @@ func TestPreviouslyInterestingTables(t *testing.T) { defer cleanup() for i, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - if tc.useUDF { - defaultSQLProcessor = udfApplierProcessor - defer func() { - defaultSQLProcessor = lwwProcessor - }() - } - tableName := fmt.Sprintf("%s%d", baseTableName, i) schemaStmt := strings.ReplaceAll(tc.schema, baseTableName, tableName) addCol := fmt.Sprintf(`ALTER TABLE %s `+lwwColumnAdd, tableName) @@ -713,7 +795,6 @@ func TestPreviouslyInterestingTables(t *testing.T) { var jobBID jobspb.JobID runnerB.QueryRow(t, streamStartStmt, dbAURL.String()).Scan(&jobBID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) if tc.delete { @@ -752,7 +833,7 @@ func TestLogicalAutoReplan(t *testing.T) { JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), DistSQL: &execinfra.TestingKnobs{ StreamingTestingKnobs: &sql.StreamingTestingKnobs{ - BeforeClientSubscribe: func(addr string, token string, _ span.Frontier) { + BeforeClientSubscribe: func(addr string, token string, _ span.Frontier, _ bool) { addressesMu.Lock() defer addressesMu.Unlock() clientAddresses[addr] = struct{}{} @@ -791,10 +872,8 @@ func TestLogicalAutoReplan(t *testing.T) { dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbAURL.String()).Scan(&jobBID) - now := server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now := s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) server.AddAndStartServer(t, clusterArgs.ServerArgs) @@ -831,6 +910,7 @@ func TestLogicalJobResiliency(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "multi cluster/node config exhausts hardware") + skip.UnderDeadlock(t, "Scattering prior to creating LDR job slows down ingestion") clusterArgs := base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ @@ -854,8 +934,7 @@ func TestLogicalJobResiliency(t *testing.T) { var jobAID jobspb.JobID dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) - now := server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now := s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) progress := jobutils.GetJobProgress(t, dbA, jobAID) @@ -910,23 +989,71 @@ func TestHeartbeatCancel(t *testing.T) { dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbAURL.String()).Scan(&jobBID) - now := server.Server(0).Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + now := s.Clock().Now() WaitUntilReplicatedTime(t, now, dbA, jobAID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) - var prodAID jobspb.JobID - dbA.QueryRow(t, "SELECT job_ID FROM [SHOW JOBS] WHERE job_type='REPLICATION STREAM PRODUCER'").Scan(&prodAID) + prodAID := replicationutils.GetProducerJobIDFromLDRJob(t, dbB, jobBID) // Cancel the producer job and wait for the hearbeat to pick up that the stream is inactive - t.Logf("Canceling replication producer %s", prodAID) + t.Logf("canceling replication producer %s", prodAID) dbA.QueryRow(t, "CANCEL JOB $1", prodAID) // The ingestion job should eventually retry because it detects 2 nodes are dead require.ErrorContains(t, <-retryErrorChan, fmt.Sprintf("replication stream %s is not running, status is STREAM_INACTIVE", prodAID)) } +func TestForeignKeyConstraints(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + clusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + DefaultTestTenant: base.TestControlsTenantsExplicitly, + Knobs: base.TestingKnobs{ + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + }, + }, + } + + server, s, dbA, _ := setupLogicalTestServer(t, ctx, clusterArgs, 1) + defer server.Stopper().Stop(ctx) + + dbBURL, cleanupB := s.PGUrl(t, serverutils.DBName("b")) + defer cleanupB() + + dbA.Exec(t, "CREATE TABLE test(a int primary key, b int)") + + testutils.RunTrueAndFalse(t, "immediate-mode", func(t *testing.T, immediateMode bool) { + testutils.RunTrueAndFalse(t, "valid-foreign-key", func(t *testing.T, validForeignKey bool) { + fkStmt := "ALTER TABLE test ADD CONSTRAINT fkc FOREIGN KEY (b) REFERENCES tab(pk)" + if !validForeignKey { + fkStmt = fkStmt + " NOT VALID" + } + dbA.Exec(t, fkStmt) + + var mode string + if immediateMode { + mode = "IMMEDIATE" + } else { + mode = "VALIDATED" + } + + var jobID jobspb.JobID + stmt := "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH MODE = " + mode + if immediateMode && validForeignKey { + dbA.ExpectErr(t, "only 'NOT VALID' foreign keys are only supported with MODE = 'validated'", stmt, dbBURL.String()) + } else { + dbA.QueryRow(t, stmt, dbBURL.String()).Scan(&jobID) + } + + dbA.Exec(t, "ALTER TABLE test DROP CONSTRAINT fkc") + }) + }) +} + func setupLogicalTestServer( t *testing.T, ctx context.Context, clusterArgs base.TestClusterArgs, numNodes int, ) ( @@ -948,7 +1075,7 @@ func setupLogicalTestServer( dbA := sqlutils.MakeSQLRunner(s.SQLConn(t, serverutils.DBName("a"))) dbB := sqlutils.MakeSQLRunner(s.SQLConn(t, serverutils.DBName("b"))) - sysDB := sqlutils.MakeSQLRunner(server.Server(0).SystemLayer().SQLConn(t)) + sysDB := sqlutils.MakeSQLRunner(server.SystemLayer(0).SQLConn(t)) for _, s := range testClusterSystemSettings { sysDB.Exec(t, s) } @@ -1000,7 +1127,7 @@ func CreateScatteredTable(t *testing.T, db *sqlutils.SQLRunner, numNodes int, db // ranges, so if we write just a few ranges those might all be on a single // server, which will cause the test to flake. numRanges := 50 - rowsPerRange := 40 + rowsPerRange := 20 db.Exec(t, "INSERT INTO tab (pk) SELECT * FROM generate_series(1, $1)", numRanges*rowsPerRange) db.Exec(t, "ALTER TABLE tab SPLIT AT (SELECT * FROM generate_series($1::INT, $2::INT, $3::INT))", @@ -1028,15 +1155,16 @@ func CreateScatteredTable(t *testing.T, db *sqlutils.SQLRunner, numNodes int, db func WaitUntilReplicatedTime( t *testing.T, targetTime hlc.Timestamp, db *sqlutils.SQLRunner, ingestionJobID jobspb.JobID, ) { + t.Logf("waiting for logical replication job %d to reach replicated time of %s", ingestionJobID, targetTime) testutils.SucceedsSoon(t, func() error { progress := jobutils.GetJobProgress(t, db, ingestionJobID) replicatedTime := progress.Details.(*jobspb.Progress_LogicalReplication).LogicalReplication.ReplicatedTime if replicatedTime.IsEmpty() { - return errors.Newf("stream ingestion has not recorded any progress yet, waiting to advance pos %s", + return errors.Newf("logical replication has not recorded any progress yet, waiting to advance pos %s", targetTime) } if replicatedTime.Less(targetTime) { - return errors.Newf("waiting for stream ingestion job progress %s to advance beyond %s", + return errors.Newf("waiting for logical replication job replicated time %s to advance beyond %s", replicatedTime, targetTime) } return nil @@ -1119,6 +1247,8 @@ func TestLogicalStreamIngestionJobWithFallbackUDF(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + skip.WithIssue(t, 129569, "flakey test") + ctx := context.Background() server, s, dbA, dbB := setupLogicalTestServer(t, ctx, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ @@ -1134,6 +1264,11 @@ func TestLogicalStreamIngestionJobWithFallbackUDF(t *testing.T) { RETURNS string AS $$ BEGIN + SELECT crdb_internal.log((proposed).payload); + IF existing IS NULL THEN + RETURN 'accept_proposed'; + END IF; + IF existing_origin_timestamp IS NULL THEN IF existing_mvcc_timestamp < proposed_mvcc_timestamp THEN SELECT crdb_internal.log('case 1'); @@ -1177,9 +1312,8 @@ func TestLogicalStreamIngestionJobWithFallbackUDF(t *testing.T) { dbB.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH FUNCTION repl_apply FOR TABLE tab", dbAURL.String()).Scan(&jobBID) now := s.Clock().Now() - t.Logf("waiting for replication job %d", jobAID) + WaitUntilReplicatedTime(t, now, dbA, jobAID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, now, dbB, jobBID) dbA.Exec(t, "INSERT INTO tab VALUES (2, 'potato')") @@ -1275,3 +1409,213 @@ func TestLogicalReplicationPlanner(t *testing.T) { requireAsOf(replicatedTime) }) } + +func TestShowLogicalReplicationJobs(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + server, s, dbA, dbB := setupLogicalTestServer(t, ctx, testClusterBaseClusterArgs, 1) + defer server.Stopper().Stop(ctx) + + dbAURL, cleanup := s.PGUrl(t, + serverutils.DBName("a"), + serverutils.UserPassword(username.RootUser, "password")) + defer cleanup() + + dbBURL, cleanupB := s.PGUrl(t, + serverutils.DBName("b"), + serverutils.UserPassword(username.RootUser, "password")) + defer cleanupB() + + redactedDbAURL := strings.Replace(dbAURL.String(), "password", `redacted`, 1) + redactedDbBURL := strings.Replace(dbBURL.String(), "password", `redacted`, 1) + + redactedJobADescription := fmt.Sprintf("LOGICAL REPLICATION STREAM into a.public.tab from %s", redactedDbBURL) + redactedJobBDescription := fmt.Sprintf("LOGICAL REPLICATION STREAM into b.public.tab from %s", redactedDbAURL) + + var ( + jobAID jobspb.JobID + jobBID jobspb.JobID + ) + dbA.QueryRow(t, + "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab on $1 INTO TABLE tab", + dbBURL.String()).Scan(&jobAID) + dbB.QueryRow(t, + "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab on $1 INTO TABLE tab WITH DEFAULT FUNCTION = 'dlq'", + dbAURL.String()).Scan(&jobBID) + + now := s.Clock().Now() + WaitUntilReplicatedTime(t, now, dbA, jobAID) + WaitUntilReplicatedTime(t, now, dbB, jobBID) + + // Sort job IDs to match rows ordered with ORDER BY clause + jobIDs := []jobspb.JobID{jobAID, jobBID} + slices.Sort(jobIDs) + + var expectedReplicatedTimes []time.Time + for _, jobID := range jobIDs { + progress := jobutils.GetJobProgress(t, dbA, jobID) + replicatedTime := progress.GetLogicalReplication().ReplicatedTime.GoTime().Round(time.Microsecond) + expectedReplicatedTimes = append(expectedReplicatedTimes, replicatedTime) + } + + var ( + jobID jobspb.JobID + status string + targets pq.StringArray + replicatedTime time.Time + replicationStartTime time.Time + conflictResolutionType string + description string + ) + + showRows := dbA.Query(t, "SELECT * FROM [SHOW LOGICAL REPLICATION JOBS] ORDER BY job_id") + defer showRows.Close() + + rowIdx := 0 + for showRows.Next() { + err := showRows.Scan(&jobID, &status, &targets, &replicatedTime) + require.NoError(t, err) + + expectedJobID := jobIDs[rowIdx] + require.Equal(t, expectedJobID, jobID) + require.Equal(t, jobs.StatusRunning, jobs.Status(status)) + + if expectedJobID == jobAID { + require.Equal(t, pq.StringArray{"a.public.tab"}, targets) + } else if expectedJobID == jobBID { + require.Equal(t, pq.StringArray{"b.public.tab"}, targets) + } + + // `SHOW LOGICAL REPLICATION JOBS` query runs after the job query in `jobutils.GetJobProgress()`, + // `LogicalReplicationProgress.ReplicatedTime` could have advanced by the time we run + // `SHOW LOGICAL REPLICATION JOBS`, therefore expectedReplicatedTime should be less than or equal to + // replicatedTime. + require.LessOrEqual(t, expectedReplicatedTimes[rowIdx], replicatedTime) + + rowIdx++ + } + require.Equal(t, 2, rowIdx) + + showWithDetailsRows := dbA.Query(t, "SELECT * FROM [SHOW LOGICAL REPLICATION JOBS WITH DETAILS] ORDER BY job_id") + defer showWithDetailsRows.Close() + + rowIdx = 0 + for showWithDetailsRows.Next() { + err := showWithDetailsRows.Scan( + &jobID, + &status, + &targets, + &replicatedTime, + &replicationStartTime, + &conflictResolutionType, + &description) + require.NoError(t, err) + + expectedJobID := jobIDs[rowIdx] + payload := jobutils.GetJobPayload(t, dbA, expectedJobID) + expectedReplicationStartTime := payload.GetLogicalReplicationDetails().ReplicationStartTime.GoTime().Round(time.Microsecond) + require.Equal(t, expectedReplicationStartTime, replicationStartTime) + + expectedConflictResolutionType := payload.GetLogicalReplicationDetails().DefaultConflictResolution.ConflictResolutionType.String() + require.Equal(t, expectedConflictResolutionType, conflictResolutionType) + + expectedJobDescription := payload.Description + + // Verify that URL is redacted in job descriptions + if jobID == jobAID { + require.Equal(t, redactedJobADescription, expectedJobDescription) + } else if jobID == jobBID { + require.Equal(t, redactedJobBDescription, expectedJobDescription) + } + + require.Equal(t, expectedJobDescription, description) + + rowIdx++ + } + require.Equal(t, 2, rowIdx) + + dbA.Exec(t, "CANCEL JOB $1", jobAID.String()) + dbA.Exec(t, "CANCEL JOB $1", jobBID.String()) + + jobutils.WaitForJobToCancel(t, dbA, jobAID) + jobutils.WaitForJobToCancel(t, dbA, jobBID) +} + +// TestUserPrivileges verifies the grants and role permissions +// needed to start and administer LDR +func TestUserPrivileges(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + clusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + DefaultTestTenant: base.TestControlsTenantsExplicitly, + Knobs: base.TestingKnobs{ + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + }, + }, + } + + server, s, dbA, _ := setupLogicalTestServer(t, ctx, clusterArgs, 1) + defer server.Stopper().Stop(ctx) + + dbBURL, cleanupB := s.PGUrl(t, serverutils.DBName("b")) + defer cleanupB() + + var jobAID jobspb.JobID + dbA.QueryRow(t, "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab", dbBURL.String()).Scan(&jobAID) + + // Create user with no privileges + dbA.Exec(t, fmt.Sprintf("CREATE USER %s", username.TestUser)) + testuser := sqlutils.MakeSQLRunner(s.SQLConn(t, serverutils.User(username.TestUser), serverutils.DBName("a"))) + + t.Run("view-job", func(t *testing.T) { + showJobStmt := "select job_id from [SHOW JOBS] where job_id=$1" + showLDRJobStmt := "select job_id from [SHOW LOGICAL REPLICATION JOBS] where job_id=$1" + // NEED VIEWJOB system grant to view admin LDR jobs + result := testuser.QueryStr(t, showJobStmt, jobAID) + require.Empty(t, result, "The user should see no rows without the VIEWJOB grant when running [SHOW JOBS]") + + result = testuser.QueryStr(t, showLDRJobStmt, jobAID) + require.Empty(t, result, "The user should see no rows without the VIEWJOB grant when running [SHOW LOGICAL REPLICATION JOBS]") + + var returnedJobID jobspb.JobID + dbA.Exec(t, fmt.Sprintf("GRANT SYSTEM VIEWJOB to %s", username.TestUser)) + testuser.QueryRow(t, showJobStmt, jobAID).Scan(&returnedJobID) + require.Equal(t, returnedJobID, jobAID, "The user should see the LDR job with the VIEWJOB grant when running [SHOW JOBS]") + + testuser.QueryRow(t, showLDRJobStmt, jobAID).Scan(&returnedJobID) + require.Equal(t, returnedJobID, jobAID, "The user should see the LDR job with the VIEWJOB grant when running [SHOW LOGICAL REPLICATION JOBS]") + }) + + // Kill replication job so we can create one with the testuser for the following test + dbA.Exec(t, "CANCEL JOB $1", jobAID) + jobutils.WaitForJobToCancel(t, dbA, jobAID) + + t.Run("create-on-schema", func(t *testing.T) { + dbA.Exec(t, "CREATE SCHEMA testschema") + + testuser.ExpectErr(t, "user testuser does not have CREATE privilege on schema testschema", fmt.Sprintf(testingUDFAcceptProposedBaseWithSchema, "testschema", "tab")) + dbA.Exec(t, "GRANT CREATE ON SCHEMA testschema TO testuser") + testuser.Exec(t, fmt.Sprintf(testingUDFAcceptProposedBaseWithSchema, "testschema", "tab")) + }) + + t.Run("replication", func(t *testing.T) { + createWithUDFStmt := "CREATE LOGICAL REPLICATION STREAM FROM TABLE tab ON $1 INTO TABLE tab WITH DEFAULT FUNCTION = 'testschema.repl_apply'" + testuser.ExpectErr(t, "user testuser does not have REPLICATION system privilege", createWithUDFStmt, dbBURL.String()) + dbA.Exec(t, fmt.Sprintf("GRANT SYSTEM REPLICATION TO %s", username.TestUser)) + testuser.QueryRow(t, createWithUDFStmt, dbBURL.String()).Scan(&jobAID) + }) + + t.Run("control-job", func(t *testing.T) { + pauseJobStmt := "PAUSE JOB $1" + testuser.ExpectErr(t, fmt.Sprintf("user testuser does not have privileges for job %s", jobAID), pauseJobStmt, jobAID) + + dbA.Exec(t, fmt.Sprintf("GRANT SYSTEM CONTROLJOB to %s", username.TestUser)) + testuser.Exec(t, pauseJobStmt, jobAID) + jobutils.WaitForJobToPause(t, dbA, jobAID) + }) +} diff --git a/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go b/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go index 9c1e5edb0229..8ba5baa3c4c8 100644 --- a/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go +++ b/pkg/ccl/crosscluster/logical/logical_replication_writer_processor.go @@ -107,8 +107,6 @@ var ( _ execinfra.RowSource = &logicalReplicationWriterProcessor{} ) -const useKVWriter = false - const logicalReplicationWriterProcessorName = "logical-replication-writer-processor" func newLogicalReplicationWriterProcessor( @@ -129,40 +127,50 @@ func newLogicalReplicationWriterProcessor( } tableConfigs := make(map[descpb.ID]sqlProcessorTableConfig) - tableIDToName := make(map[int32]fullyQualifiedTableName) - for tableID, md := range spec.TableMetadata { + srcTableIDToDstMeta := make(map[descpb.ID]dstTableMetadata) + for dstTableID, md := range spec.TableMetadata { desc := md.SourceDescriptor - tableConfigs[descpb.ID(tableID)] = sqlProcessorTableConfig{ + tableConfigs[descpb.ID(dstTableID)] = sqlProcessorTableConfig{ srcDesc: tabledesc.NewBuilder(&desc).BuildImmutableTable(), dstOID: md.DestinationFunctionOID, } - tableIDToName[tableID] = fullyQualifiedTableName{ + srcTableID := desc.GetID() + srcTableIDToDstMeta[srcTableID] = dstTableMetadata{ database: md.DestinationParentDatabaseName, schema: md.DestinationParentSchemaName, table: md.DestinationTableName, + tableID: descpb.ID(dstTableID), } } bhPool := make([]BatchHandler, maxWriterWorkers) for i := range bhPool { + sqlRP, err := makeSQLProcessor( + ctx, flowCtx.Cfg.Settings, tableConfigs, + jobspb.JobID(spec.JobID), + // Initialize the executor with a fresh session data - this will + // avoid creating a new copy on each executor usage. + flowCtx.Cfg.DB.Executor(isql.WithSessionData(sql.NewInternalSessionData(ctx, flowCtx.Cfg.Settings, "" /* opName */))), + ) + if err != nil { + return nil, err + } var rp RowProcessor - if useKVWriter { - rp, err = newKVRowProcessor(ctx, flowCtx.Cfg, flowCtx.EvalCtx, tableConfigs) + if spec.Mode == jobspb.LogicalReplicationDetails_Immediate { + rp, err = newKVRowProcessor(ctx, flowCtx.Cfg, flowCtx.EvalCtx, tableConfigs, sqlRP) if err != nil { return nil, err } } else { - rp, err = makeSQLProcessor( - ctx, flowCtx.Cfg.Settings, tableConfigs, - jobspb.JobID(spec.JobID), - // Initialize the executor with a fresh session data - this will - // avoid creating a new copy on each executor usage. - flowCtx.Cfg.DB.Executor(isql.WithSessionData(sql.NewInternalSessionData(ctx, flowCtx.Cfg.Settings, "" /* opName */))), - ) - if err != nil { - return nil, err + rp = sqlRP + } + + if streamingKnobs, ok := flowCtx.TestingKnobs().StreamingTestingKnobs.(*sql.StreamingTestingKnobs); ok { + if streamingKnobs != nil && streamingKnobs.FailureRate != 0 { + rp.SetSyntheticFailurePercent(streamingKnobs.FailureRate) } } + bhPool[i] = &txnBatch{ db: flowCtx.Cfg.DB, rp: rp, @@ -215,7 +223,7 @@ func newLogicalReplicationWriterProcessor( StreamID: streampb.StreamID(spec.StreamID), ProcessorID: processorID, }, - dlqClient: InitDeadLetterQueueClient(dlqDbExec, tableIDToName), + dlqClient: InitDeadLetterQueueClient(dlqDbExec, srcTableIDToDstMeta), metrics: flowCtx.Cfg.JobRegistry.MetricsStruct().JobSpecificMetrics[jobspb.TypeLogicalReplication].(*Metrics), } lrw.purgatory = purgatory{ @@ -288,7 +296,7 @@ func (lrw *logicalReplicationWriterProcessor) Start(ctx context.Context) { if streamingKnobs, ok := lrw.FlowCtx.TestingKnobs().StreamingTestingKnobs.(*sql.StreamingTestingKnobs); ok { if streamingKnobs != nil && streamingKnobs.BeforeClientSubscribe != nil { - streamingKnobs.BeforeClientSubscribe(addr, string(token), lrw.frontier) + streamingKnobs.BeforeClientSubscribe(addr, string(token), lrw.frontier, lrw.spec.IgnoreCDCIgnoredTTLDeletes) } } sub, err := streamClient.Subscribe(ctx, @@ -296,7 +304,7 @@ func (lrw *logicalReplicationWriterProcessor) Start(ctx context.Context) { int32(lrw.FlowCtx.NodeID.SQLInstanceID()), lrw.ProcessorID, token, lrw.spec.InitialScanTimestamp, lrw.frontier, - streamclient.WithFiltering(true), + streamclient.WithFiltering(lrw.spec.IgnoreCDCIgnoredTTLDeletes), streamclient.WithDiff(true), ) if err != nil { @@ -388,11 +396,6 @@ func (lrw *logicalReplicationWriterProcessor) close() { if lrw.Closed { return } - - for _, b := range lrw.bh { - b.Close(lrw.Ctx()) - } - defer lrw.frontier.Release() if lrw.streamPartitionClient != nil { @@ -412,6 +415,10 @@ func (lrw *logicalReplicationWriterProcessor) close() { log.Errorf(lrw.Ctx(), "error on close(): %s", err) } + for _, b := range lrw.bh { + b.Close(lrw.Ctx()) + } + // Update the global retry queue gauges to reflect that this queue is going // away, including everything in it that is included in those gauges. lrw.purgatory.bytesGauge.Dec(lrw.purgatory.bytes) @@ -638,6 +645,14 @@ func (lrw *logicalReplicationWriterProcessor) flushBuffer( todo = todo[len(chunk):] bh := lrw.bh[worker] + if err := ctx.Err(); err != nil { + // Bail early if ctx is canceled. NB: we break rather than return the err + // now since we still need to Wait() to avoid leaking a goroutine. We will + // re-check for any ctx errors after the Wait() in case all workers had + // completed without error as of this break. + break + } + g.GoCtx(func(ctx context.Context) error { s, err := lrw.flushChunk(ctx, bh, chunk, canRetry) if err != nil { @@ -645,6 +660,7 @@ func (lrw *logicalReplicationWriterProcessor) flushBuffer( } perChunkStats[worker] = s lrw.metrics.OptimisticInsertConflictCount.Inc(s.optimisticInsertConflicts) + lrw.metrics.KVWriteFallbackCount.Inc(s.kvWriteFallbacks) return nil }) } @@ -653,6 +669,10 @@ func (lrw *logicalReplicationWriterProcessor) flushBuffer( return nil, 0, err } + if err := ctx.Err(); err != nil { + return nil, 0, err + } + var stats flushStats for _, i := range perChunkStats { stats.Add(i) @@ -763,6 +783,10 @@ func (lrw *logicalReplicationWriterProcessor) flushChunk( preBatchTime := timeutil.Now() if s, err := bh.HandleBatch(ctx, batch); err != nil { + if ctxErr := ctx.Err(); ctxErr != nil { + return flushStats{}, ctxErr + } + // If it already failed while applying on its own, handle the failure. if len(batch) == 1 { if eligibility := lrw.shouldRetryLater(err, canRetry); eligibility != retryAllowed { @@ -779,6 +803,9 @@ func (lrw *logicalReplicationWriterProcessor) flushChunk( // to apply on its own before switching to handle its failure. for i := range batch { if singleStats, err := bh.HandleBatch(ctx, batch[i:i+1]); err != nil { + if ctxErr := ctx.Err(); ctxErr != nil { + return flushStats{}, ctxErr + } if eligibility := lrw.shouldRetryLater(err, canRetry); eligibility != retryAllowed { if err := lrw.dlq(ctx, batch[i], bh.GetLastRow(), err, eligibility); err != nil { return flushStats{}, err @@ -790,6 +817,7 @@ func (lrw *logicalReplicationWriterProcessor) flushChunk( } } else { stats.optimisticInsertConflicts += singleStats.optimisticInsertConflicts + stats.kvWriteFallbacks += singleStats.kvWriteFallbacks batch[i] = streampb.StreamEvent_KV{} stats.processed.success++ stats.processed.bytes += int64(batch[i].Size()) @@ -798,6 +826,7 @@ func (lrw *logicalReplicationWriterProcessor) flushChunk( } } else { stats.optimisticInsertConflicts += s.optimisticInsertConflicts + stats.kvWriteFallbacks += s.kvWriteFallbacks stats.processed.success += int64(len(batch)) // Clear the event to indicate successful application. for i := range batch { @@ -823,6 +852,11 @@ func (lrw *logicalReplicationWriterProcessor) shouldRetryLater( if eligibility != retryAllowed { return eligibility } + + if errors.Is(err, errInjected) { + return tooOld + } + // TODO(dt): maybe this should only be constraint violation errors? return retryAllowed } @@ -867,6 +901,7 @@ func (lrw *logicalReplicationWriterProcessor) dlq( type batchStats struct { optimisticInsertConflicts int64 + kvWriteFallbacks int64 } type flushStats struct { processed struct { @@ -875,7 +910,7 @@ type flushStats struct { notProcessed struct { count, bytes int64 } - optimisticInsertConflicts int64 + optimisticInsertConflicts, kvWriteFallbacks int64 } func (b *flushStats) Add(o flushStats) { @@ -885,6 +920,7 @@ func (b *flushStats) Add(o flushStats) { b.notProcessed.count += o.notProcessed.count b.notProcessed.bytes += o.notProcessed.bytes b.optimisticInsertConflicts += o.optimisticInsertConflicts + b.kvWriteFallbacks += o.kvWriteFallbacks } type BatchHandler interface { diff --git a/pkg/ccl/crosscluster/logical/lww_kv_processor.go b/pkg/ccl/crosscluster/logical/lww_kv_processor.go index 39675354d3de..d776540d9130 100644 --- a/pkg/ccl/crosscluster/logical/lww_kv_processor.go +++ b/pkg/ccl/crosscluster/logical/lww_kv_processor.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -42,6 +43,7 @@ type kvRowProcessor struct { dstBySrc map[descpb.ID]descpb.ID writers map[descpb.ID]*kvTableWriter + fallback *sqlRowProcessor failureInjector } @@ -52,6 +54,7 @@ func newKVRowProcessor( cfg *execinfra.ServerConfig, evalCtx *eval.Context, srcTablesByDestID map[descpb.ID]sqlProcessorTableConfig, + fallback *sqlRowProcessor, ) (*kvRowProcessor, error) { cdcEventTargets := changefeedbase.Targets{} srcTablesBySrcID := make(map[descpb.ID]catalog.TableDescriptor, len(srcTablesByDestID)) @@ -82,6 +85,7 @@ func newKVRowProcessor( writers: make(map[descpb.ID]*kvTableWriter, len(srcTablesByDestID)), decoder: cdcevent.NewEventDecoderWithCache(ctx, rfCache, false, false), alloc: &tree.DatumAlloc{}, + fallback: fallback, } return p, nil } @@ -91,10 +95,6 @@ var originID1Options = &kvpb.WriteOptions{OriginID: 1} func (p *kvRowProcessor) ProcessRow( ctx context.Context, txn isql.Txn, keyValue roachpb.KeyValue, prevValue roachpb.Value, ) (batchStats, error) { - if err := p.injectFailure(); err != nil { - return batchStats{}, err - } - var err error keyValue.Key, err = keys.StripTenantPrefix(keyValue.Key) if err != nil { @@ -108,9 +108,30 @@ func (p *kvRowProcessor) ProcessRow( } p.lastRow = row - if err := p.processParsedRow(ctx, txn, row, keyValue, prevValue); err != nil { + if err = p.injectFailure(); err != nil { return batchStats{}, err } + + // TODO(dt, ssd): the rangefeed prev value does not include its mvcc ts, which + // is a problem for us if we want to use CPut to replace the old row with the + // new row, because our local version of the old row is likely to have the + // remote version's mvcc timestamp in its origin ts column, i.e. in the value. + // Without knowing the remote previous row's ts, we cannot exactly reconstruct + // the value of our local row to put in the expected value for a CPut. + // Instead, for now, we just don't use the direct CPut for anything other than + // inserts. If/when we have a LDR-flavor CPut (or if we move the TS out and + // decide that equal values negate LWW) we can remove this. + if prevValue.IsPresent() { + return p.fallback.processParsedRow(ctx, txn, row, keyValue.Key, prevValue) + } + + if err := p.processParsedRow(ctx, txn, row, keyValue, prevValue); err != nil { + stats, err := p.fallback.processParsedRow(ctx, txn, row, keyValue.Key, prevValue) + if err == nil { + stats.kvWriteFallbacks += 1 + } + return stats, err + } return batchStats{}, nil } @@ -123,10 +144,17 @@ func (p *kvRowProcessor) processParsedRow( return errors.AssertionFailedf("replication configuration missing for table %d / %q", row.TableID, row.TableName) } + makeBatch := func(txn *kv.Txn) *kv.Batch { + b := txn.NewBatch() + b.Header.WriteOptions = originID1Options + b.AdmissionHeader.Priority = int32(admissionpb.BulkLowPri) + b.AdmissionHeader.Source = kvpb.AdmissionHeader_FROM_SQL + return b + } + if txn == nil { if err := p.cfg.DB.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - b := txn.NewBatch() - b.Header.WriteOptions = originID1Options + b := makeBatch(txn) if err := p.addToBatch(ctx, txn, b, dstTableID, row, k, prevValue); err != nil { return err @@ -140,8 +168,7 @@ func (p *kvRowProcessor) processParsedRow( } kvTxn := txn.KV() - b := kvTxn.NewBatch() - b.Header.WriteOptions = originID1Options + b := makeBatch(kvTxn) if err := p.addToBatch(ctx, kvTxn, b, dstTableID, row, k, prevValue); err != nil { return err @@ -200,7 +227,7 @@ func (p *kvRowProcessor) GetLastRow() cdcevent.Row { // SetSyntheticFailurePercent implements the RowProcessor interface. func (p *kvRowProcessor) SetSyntheticFailurePercent(rate uint32) { - // TODO(dt): support failure injection. + p.rate = rate } func (p *kvRowProcessor) Close(ctx context.Context) { @@ -254,6 +281,7 @@ type kvTableWriter struct { ru row.Updater ri row.Inserter rd row.Deleter + scratchTS tree.DDecimal } func newKVTableWriter( @@ -348,6 +376,10 @@ func (p *kvTableWriter) fillNew(vals cdcevent.Row) error { p.newVals = p.newVals[:0] if err := vals.ForAllColumns().Datum(func(d tree.Datum, col cdcevent.ResultColumn) error { // TODO(dt): add indirection from col ID to offset. + if col.Name == originTimestampColumnName { + p.scratchTS.Decimal = eval.TimestampToDecimal(vals.MvccTimestamp) + d = &p.scratchTS + } p.newVals = append(p.newVals, d) return nil }); err != nil { diff --git a/pkg/ccl/crosscluster/logical/lww_row_processor.go b/pkg/ccl/crosscluster/logical/lww_row_processor.go index 61f73588be24..d57c60a9e392 100644 --- a/pkg/ccl/crosscluster/logical/lww_row_processor.go +++ b/pkg/ccl/crosscluster/logical/lww_row_processor.go @@ -39,27 +39,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/metamorphic" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/errors" - "github.com/cockroachdb/redact" ) const ( originTimestampColumnName = "crdb_replication_origin_timestamp" ) -type processorType string - -// SafeValue implements the redact.SafeValue interface. -func (p processorType) SafeValue() {} - -var _ redact.SafeValue = defaultSQLProcessor - -const ( - lwwProcessor processorType = "last-write-wins" - udfApplierProcessor processorType = "applier-udf" -) - -var defaultSQLProcessor = lwwProcessor - // A sqlRowProcessor is a RowProcessor that handles rows using the // provided querier. type sqlRowProcessor struct { @@ -79,7 +64,7 @@ type querier interface { AddTable(targetDescID int32, tc sqlProcessorTableConfig) error InsertRow(ctx context.Context, txn isql.Txn, ie isql.Executor, row cdcevent.Row, prevRow *cdcevent.Row, likelyInsert bool) (batchStats, error) DeleteRow(ctx context.Context, txn isql.Txn, ie isql.Executor, row cdcevent.Row, prevRow *cdcevent.Row) (batchStats, error) - RequiresParsedBeforeRow() bool + RequiresParsedBeforeRow(catid.DescID) bool } type queryBuilder struct { @@ -205,23 +190,6 @@ type sqlProcessorTableConfig struct { dstOID uint32 } -func makeSQLProcessor( - ctx context.Context, - settings *cluster.Settings, - tableConfigs map[descpb.ID]sqlProcessorTableConfig, - jobID jobspb.JobID, - ie isql.Executor, -) (*sqlRowProcessor, error) { - switch defaultSQLProcessor { - case lwwProcessor: - return makeSQLLastWriteWinsHandler(ctx, settings, tableConfigs, jobID, ie) - case udfApplierProcessor: - return makeUDFApplierProcessor(ctx, settings, tableConfigs, jobID, ie) - default: - return nil, errors.AssertionFailedf("unknown SQL processor: %s", defaultSQLProcessor) - } -} - func makeSQLProcessorFromQuerier( ctx context.Context, settings *cluster.Settings, @@ -271,7 +239,7 @@ func (p *failureInjector) SetSyntheticFailurePercent(rate uint32) { p.rate = rate } -func (p failureInjector) injectFailure() error { +func (p *failureInjector) injectFailure() error { if p.rate != 0 { if randutil.FastUint32()%100 < p.rate { return errInjected @@ -299,10 +267,16 @@ func (srp *sqlRowProcessor) ProcessRow( } srp.lastRow = row + return srp.processParsedRow(ctx, txn, row, kv.Key, prevValue) +} + +func (srp *sqlRowProcessor) processParsedRow( + ctx context.Context, txn isql.Txn, row cdcevent.Row, key roachpb.Key, prevValue roachpb.Value, +) (batchStats, error) { var parsedBeforeRow *cdcevent.Row - if srp.querier.RequiresParsedBeforeRow() { + if srp.querier.RequiresParsedBeforeRow(row.TableID) { before, err := srp.decoder.DecodeKV(ctx, roachpb.KeyValue{ - Key: kv.Key, + Key: key, Value: prevValue, }, cdcevent.PrevRow, prevValue.Timestamp, false) if err != nil { @@ -311,13 +285,10 @@ func (srp *sqlRowProcessor) ProcessRow( parsedBeforeRow = &before } - var stats batchStats if row.IsDeleted() { - stats, err = srp.querier.DeleteRow(ctx, txn, srp.ie, row, parsedBeforeRow) - } else { - stats, err = srp.querier.InsertRow(ctx, txn, srp.ie, row, parsedBeforeRow, prevValue.RawBytes == nil) + return srp.querier.DeleteRow(ctx, txn, srp.ie, row, parsedBeforeRow) } - return stats, err + return srp.querier.InsertRow(ctx, txn, srp.ie, row, parsedBeforeRow, prevValue.RawBytes == nil) } func (srp *sqlRowProcessor) GetLastRow() cdcevent.Row { @@ -394,7 +365,7 @@ const ( insertQueriesPessimisticIndex = 1 ) -func makeSQLLastWriteWinsHandler( +func makeSQLProcessor( ctx context.Context, settings *cluster.Settings, tableConfigs map[descpb.ID]sqlProcessorTableConfig, @@ -402,32 +373,76 @@ func makeSQLLastWriteWinsHandler( ie isql.Executor, ) (*sqlRowProcessor, error) { - needFallback := false - shouldUseFallback := make(map[catid.DescID]bool, len(tableConfigs)) + needUDFQuerier := false + shouldUseUDF := make(map[catid.DescID]bool, len(tableConfigs)) for _, tc := range tableConfigs { - shouldUseFallback[tc.srcDesc.GetID()] = tc.dstOID != 0 - needFallback = needFallback || tc.dstOID != 0 + shouldUseUDF[tc.srcDesc.GetID()] = tc.dstOID != 0 + needUDFQuerier = needUDFQuerier || tc.dstOID != 0 } - var fallbackQuerier querier - if needFallback { - fallbackQuerier = makeApplierQuerier(ctx, settings, tableConfigs, jobID, ie) + lwwQuerier := &lwwQuerier{ + settings: settings, + queryBuffer: queryBuffer{ + deleteQueries: make(map[catid.DescID]queryBuilder, len(tableConfigs)), + insertQueries: make(map[catid.DescID]map[catid.FamilyID]queryBuilder, len(tableConfigs)), + }, + ieOverrideOptimisticInsert: getIEOverride(replicatedOptimisticInsertOpName, jobID), + ieOverrideInsert: getIEOverride(replicatedInsertOpName, jobID), + ieOverrideDelete: getIEOverride(replicatedDeleteOpName, jobID), + } + var udfQuerier querier + if needUDFQuerier { + udfQuerier = makeApplierQuerier(ctx, settings, tableConfigs, jobID, ie) } - qb := queryBuffer{ - deleteQueries: make(map[catid.DescID]queryBuilder, len(tableConfigs)), - insertQueries: make(map[catid.DescID]map[catid.FamilyID]queryBuilder, len(tableConfigs)), + return makeSQLProcessorFromQuerier(ctx, settings, tableConfigs, ie, &muxQuerier{ + shouldUseUDF: shouldUseUDF, + lwwQuerier: lwwQuerier, + udfQuerier: udfQuerier, + }) + +} + +// muxQuerier is a querier that dispatches to either an LWW querier or a UDF +// querier. +type muxQuerier struct { + shouldUseUDF map[catid.DescID]bool + lwwQuerier querier + udfQuerier querier +} + +func (m *muxQuerier) AddTable(targetDescID int32, tc sqlProcessorTableConfig) error { + if m.shouldUseUDF[tc.srcDesc.GetID()] { + return m.udfQuerier.AddTable(targetDescID, tc) } - return makeSQLProcessorFromQuerier(ctx, settings, tableConfigs, ie, - &lwwQuerier{ - settings: settings, - queryBuffer: qb, - shouldUseFallback: shouldUseFallback, - fallbackQuerier: fallbackQuerier, - ieOverrideOptimisticInsert: getIEOverride(replicatedOptimisticInsertOpName, jobID), - ieOverrideInsert: getIEOverride(replicatedInsertOpName, jobID), - ieOverrideDelete: getIEOverride(replicatedDeleteOpName, jobID), - }) + return m.lwwQuerier.AddTable(targetDescID, tc) +} + +func (m *muxQuerier) InsertRow( + ctx context.Context, + txn isql.Txn, + ie isql.Executor, + row cdcevent.Row, + prevRow *cdcevent.Row, + likelyInsert bool, +) (batchStats, error) { + if m.shouldUseUDF[row.TableID] { + return m.udfQuerier.InsertRow(ctx, txn, ie, row, prevRow, likelyInsert) + } + return m.lwwQuerier.InsertRow(ctx, txn, ie, row, prevRow, likelyInsert) +} + +func (m *muxQuerier) DeleteRow( + ctx context.Context, txn isql.Txn, ie isql.Executor, row cdcevent.Row, prevRow *cdcevent.Row, +) (batchStats, error) { + if m.shouldUseUDF[row.TableID] { + return m.udfQuerier.DeleteRow(ctx, txn, ie, row, prevRow) + } + return m.lwwQuerier.DeleteRow(ctx, txn, ie, row, prevRow) +} + +func (m *muxQuerier) RequiresParsedBeforeRow(id catid.DescID) bool { + return m.shouldUseUDF[id] } // lwwQuerier is a querier that implements partial @@ -450,9 +465,6 @@ type lwwQuerier struct { settings *cluster.Settings queryBuffer queryBuffer - shouldUseFallback map[catid.DescID]bool - fallbackQuerier querier - ieOverrideOptimisticInsert sessiondata.InternalExecutorOverride ieOverrideInsert sessiondata.InternalExecutorOverride ieOverrideDelete sessiondata.InternalExecutorOverride @@ -469,26 +481,10 @@ func (lww *lwwQuerier) AddTable(targetDescID int32, tc sqlProcessorTableConfig) if err != nil { return err } - - if lww.shouldUseFallbackQuerier(td.GetID()) { - if err := lww.fallbackQuerier.AddTable(targetDescID, tc); err != nil { - return err - } - } return nil } -func (lww *lwwQuerier) shouldUseFallbackQuerier(id catid.DescID) bool { - if lww.fallbackQuerier == nil { - return false - } - return lww.shouldUseFallback[id] -} - -func (lww *lwwQuerier) RequiresParsedBeforeRow() bool { - if lww.fallbackQuerier != nil { - return lww.fallbackQuerier.RequiresParsedBeforeRow() - } +func (lww *lwwQuerier) RequiresParsedBeforeRow(catid.DescID) bool { return false } @@ -512,9 +508,7 @@ func (lww *lwwQuerier) InsertRow( return batchStats{}, err } - fallbackSpecified := lww.shouldUseFallbackQuerier(row.TableID) shouldTryOptimisticInsert := likelyInsert && tryOptimisticInsertEnabled.Get(&lww.settings.SV) - shouldTryOptimisticInsert = shouldTryOptimisticInsert || fallbackSpecified var optimisticInsertConflicts int64 if shouldTryOptimisticInsert { stmt, datums, err := insertQueryBuilder.Query(insertQueriesOptimisticIndex) @@ -536,15 +530,6 @@ func (lww *lwwQuerier) InsertRow( } } - if fallbackSpecified { - s, err := lww.fallbackQuerier.InsertRow(ctx, txn, ie, row, prevRow, likelyInsert) - if err != nil { - return batchStats{}, err - } - s.optimisticInsertConflicts += optimisticInsertConflicts - return s, err - } - stmt, datums, err := insertQueryBuilder.Query(insertQueriesPessimisticIndex) if err != nil { return batchStats{}, err diff --git a/pkg/ccl/crosscluster/logical/lww_row_processor_test.go b/pkg/ccl/crosscluster/logical/lww_row_processor_test.go index 1a2465996bd6..4cc291658dbc 100644 --- a/pkg/ccl/crosscluster/logical/lww_row_processor_test.go +++ b/pkg/ccl/crosscluster/logical/lww_row_processor_test.go @@ -12,6 +12,7 @@ import ( "context" "fmt" "testing" + "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/crosscluster/replicationtestutils" @@ -19,8 +20,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -81,7 +86,7 @@ func TestLWWInsertQueryGeneration(t *testing.T) { tableNameDst := createTable(t, schemaTmpl) srcDesc := desctestutils.TestingGetPublicTableDescriptor(s.DB(), s.Codec(), "defaultdb", tableNameSrc) dstDesc := desctestutils.TestingGetPublicTableDescriptor(s.DB(), s.Codec(), "defaultdb", tableNameDst) - rp, err := makeSQLLastWriteWinsHandler(ctx, s.ClusterSettings(), map[descpb.ID]sqlProcessorTableConfig{ + rp, err := makeSQLProcessor(ctx, s.ClusterSettings(), map[descpb.ID]sqlProcessorTableConfig{ dstDesc.GetID(): { srcDesc: srcDesc, }, @@ -150,7 +155,7 @@ func BenchmarkLWWInsertBatch(b *testing.B) { desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, s.Codec(), "defaultdb", tableName) // Simulate how we set up the row processor on the main code path. sd := sql.NewInternalSessionData(ctx, s.ClusterSettings(), "" /* opName */) - rp, err := makeSQLLastWriteWinsHandler(ctx, s.ClusterSettings(), map[descpb.ID]sqlProcessorTableConfig{ + rp, err := makeSQLProcessor(ctx, s.ClusterSettings(), map[descpb.ID]sqlProcessorTableConfig{ desc.GetID(): { srcDesc: desc, }, @@ -301,3 +306,185 @@ func BenchmarkLWWInsertBatch(b *testing.B) { }) } } + +// TestLWWConflictResolution tests how write conflicts are handled under the default +// last write wins mode. +func TestLWWConflictResolution(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + srv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() + + runner := sqlutils.MakeSQLRunner(sqlDB) + + // Create new tables for each test to prevent conflicts between tests + tableNumber := 0 + createTable := func(t *testing.T) string { + tableName := fmt.Sprintf("tab%d", tableNumber) + runner.Exec(t, fmt.Sprintf(`CREATE TABLE %s (pk int primary key, payload string)`, tableName)) + runner.Exec(t, fmt.Sprintf( + "ALTER TABLE %s "+lwwColumnAdd, + tableName)) + tableNumber++ + return tableName + } + + // The encoderFn takes an origin timestamp and a row and converts into a Key-Value format that + // can be ingested by the RowProcessor + type encoderFn func(originTimestamp hlc.Timestamp, datums ...interface{}) roachpb.KeyValue + + setup := func(t *testing.T, useKVProc bool) (string, RowProcessor, encoderFn) { + tableNameSrc := createTable(t) + tableNameDst := createTable(t) + srcDesc := desctestutils.TestingGetPublicTableDescriptor(s.DB(), s.Codec(), "defaultdb", tableNameSrc) + dstDesc := desctestutils.TestingGetPublicTableDescriptor(s.DB(), s.Codec(), "defaultdb", tableNameDst) + + // We need the SQL row processor even when testing the KW row processor since it's the fallback + var rp RowProcessor + rp, err := makeSQLProcessor(ctx, s.ClusterSettings(), map[descpb.ID]sqlProcessorTableConfig{ + dstDesc.GetID(): { + srcDesc: srcDesc, + }, + }, jobspb.JobID(1), s.InternalExecutor().(isql.Executor)) + require.NoError(t, err) + + if useKVProc { + rp, err = newKVRowProcessor(ctx, + &execinfra.ServerConfig{ + DB: s.InternalDB().(descs.DB), + LeaseManager: s.LeaseManager(), + }, &eval.Context{ + Codec: s.Codec(), + Settings: s.ClusterSettings(), + }, map[descpb.ID]sqlProcessorTableConfig{ + dstDesc.GetID(): { + srcDesc: srcDesc, + }, + }, + rp.(*sqlRowProcessor)) + require.NoError(t, err) + } + return tableNameDst, rp, func(originTimestamp hlc.Timestamp, datums ...interface{}) roachpb.KeyValue { + kv := replicationtestutils.EncodeKV(t, s.Codec(), srcDesc, datums...) + kv.Value.Timestamp = originTimestamp + return kv + } + } + + insertRow := func(rp RowProcessor, keyValue roachpb.KeyValue, prevValue roachpb.Value) error { + return s.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := rp.ProcessRow(ctx, txn, keyValue, prevValue) + return err + }) + } + + timeNow := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} + timeNowPlusOne := hlc.Timestamp{WallTime: timeutil.Now().Add(time.Microsecond).UnixNano()} + timeOneDayForward := hlc.Timestamp{WallTime: timeutil.Now().Add(time.Hour * 24).UnixNano()} + timeOneDayBackward := hlc.Timestamp{WallTime: timeutil.Now().Add(time.Hour * -24).UnixNano()} + row1 := []interface{}{1, "row1"} + row2 := []interface{}{1, "row2"} + row3 := []interface{}{1, "row3"} + + // Run with both the SQL row processor and the KV row processor. Note that currently, + // KV row proc write conflicts result in a fallback to the SQL row processor. + testutils.RunTrueAndFalse(t, "useKVProc", func(t *testing.T, useKVProc bool) { + // All of those combinations are tested with both optimistic inserts and standard inserts. + testutils.RunTrueAndFalse(t, "optimistic_insert", func(t *testing.T, optimisticInsert bool) { + runner.Exec(t, fmt.Sprintf("SET CLUSTER SETTING logical_replication.consumer.try_optimistic_insert.enabled=%t", optimisticInsert)) + + // Write to both the remote and the local table and see how conflicts are handled + // When a remote insert conflicts with a local write, the mvcc timestamp of the remote + // write is compared with the mvcc timestamp of the local write + t.Run("cross-cluster-insert", func(t *testing.T) { + tableNameDst, rp, encoder := setup(t, useKVProc) + + runner.Exec(t, fmt.Sprintf("INSERT INTO %s VALUES ($1, $2)", tableNameDst), row1...) + + keyValue2 := encoder(timeOneDayBackward, row2...) + require.NoError(t, insertRow(rp, keyValue2, roachpb.Value{})) + + expectedRows := [][]string{ + {"1", "row1"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + + keyValue3 := encoder(timeOneDayForward, row2...) + require.NoError(t, insertRow(rp, keyValue3, keyValue2.Value)) + + expectedRows = [][]string{ + {"1", "row2"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + }) + + // Receive multiple updates remotely and handle conflicts between them. When a row is received, + // its mvcc timestamp is compared against the local value of crdb_replication_origin_timestamp + t.Run("remote-update", func(t *testing.T) { + tableNameDst, rp, encoder := setup(t, useKVProc) + + keyValue1 := encoder(timeNow, row1...) + require.NoError(t, insertRow(rp, keyValue1, roachpb.Value{})) + + keyValue2 := encoder(timeOneDayForward, row2...) + require.NoError(t, insertRow(rp, keyValue2, keyValue1.Value)) + + expectedRows := [][]string{ + {"1", "row2"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + + // Simulate a rangefeed retransmission by sending the older row again + require.NoError(t, insertRow(rp, keyValue1, roachpb.Value{})) + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + + // Validate that the remote timestamp is used to handle the conflict between two remote rows. + // Try to add a row with a slightly higher MVCC timestamp than any currently in the table, however + // this value will still be lower than crdb_replication_origin_timestamp for row2 and row2 should persist + var maxMVCC float64 + runner.QueryRow(t, fmt.Sprintf("SELECT max(crdb_internal_mvcc_timestamp) FROM %s", tableNameDst)).Scan(&maxMVCC) + + keyValue3 := encoder(hlc.Timestamp{WallTime: int64(maxMVCC) + 1}, row3...) + require.NoError(t, insertRow(rp, keyValue3, keyValue2.Value)) + expectedRows = [][]string{ + {"1", "row2"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + }) + + // From the perspective of the row processor, once the first row is processed, the next incoming event from the + // remote rangefeed should have a "previous row" that matches the row currently in the local table. If writes on + // the local and remote table occur too close together, both tables will attempt to propagate to the other, and + //the winner of the conflict will depend on the MVCC timestamp just like the cross cluster write scenario + t.Run("outdated-write-conflict", func(t *testing.T) { + tableNameDst, rp, encoder := setup(t, useKVProc) + + keyValue1 := encoder(timeNow, row1...) + require.NoError(t, insertRow(rp, keyValue1, roachpb.Value{})) + + runner.Exec(t, fmt.Sprintf("UPSERT INTO %s VALUES ($1, $2)", tableNameDst), row2...) + + // The remote cluster sends another write, but the local write wins the conflict + keyValue1QuickUpdate := encoder(timeNowPlusOne, row3...) + require.NoError(t, insertRow(rp, keyValue1QuickUpdate, keyValue1.Value)) + + expectedRows := [][]string{ + {"1", "row2"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + + // This time the remote write should win the conflict + keyValue3 := encoder(timeOneDayForward, row3...) + require.NoError(t, insertRow(rp, keyValue3, keyValue1QuickUpdate.Value)) + + expectedRows = [][]string{ + {"1", "row3"}, + } + runner.CheckQueryResults(t, fmt.Sprintf("SELECT * from %s", tableNameDst), expectedRows) + }) + }) + }) +} diff --git a/pkg/ccl/crosscluster/logical/metrics.go b/pkg/ccl/crosscluster/logical/metrics.go index be71761c47bc..f964dd2b8bc9 100644 --- a/pkg/ccl/crosscluster/logical/metrics.go +++ b/pkg/ccl/crosscluster/logical/metrics.go @@ -143,6 +143,12 @@ var ( Measurement: "Events", Unit: metric.Unit_COUNT, } + metaKVWriteFallbackCount = metric.Metadata{ + Name: "logical_replication.kv_write_fallback_count", + Help: "Total number of times the kv write path could not handle a row update and fell back to SQL instead", + Measurement: "Events", + Unit: metric.Unit_COUNT, + } metaDistSQLReplanCount = metric.Metadata{ Name: "logical_replication.replan_count", Help: "Total number of dist sql replanning events", @@ -185,6 +191,7 @@ type Metrics struct { StreamBatchBytesHist metric.IHistogram StreamBatchNanosHist metric.IHistogram OptimisticInsertConflictCount *metric.Counter + KVWriteFallbackCount *metric.Counter ReplanCount *metric.Counter } @@ -240,6 +247,7 @@ func MakeMetrics(histogramWindow time.Duration) metric.Struct { BucketConfig: metric.IOLatencyBuckets, }), OptimisticInsertConflictCount: metric.NewCounter(metaOptimisticInsertConflictCount), + KVWriteFallbackCount: metric.NewCounter(metaKVWriteFallbackCount), ReplanCount: metric.NewCounter(metaDistSQLReplanCount), } } diff --git a/pkg/ccl/crosscluster/logical/udf_row_processor.go b/pkg/ccl/crosscluster/logical/udf_row_processor.go index e9d777eba8c9..8e90e4b22057 100644 --- a/pkg/ccl/crosscluster/logical/udf_row_processor.go +++ b/pkg/ccl/crosscluster/logical/udf_row_processor.go @@ -113,17 +113,6 @@ func makeApplierQuerier( } } -func makeUDFApplierProcessor( - ctx context.Context, - settings *cluster.Settings, - tableDescs map[descpb.ID]sqlProcessorTableConfig, - jobID jobspb.JobID, - ie isql.Executor, -) (*sqlRowProcessor, error) { - aq := makeApplierQuerier(ctx, settings, tableDescs, jobID, ie) - return makeSQLProcessorFromQuerier(ctx, settings, tableDescs, ie, aq) -} - func (aq *applierQuerier) AddTable(targetDescID int32, tc sqlProcessorTableConfig) error { var err error td := tc.srcDesc @@ -142,7 +131,7 @@ func (aq *applierQuerier) AddTable(targetDescID int32, tc sqlProcessorTableConfi return err } -func (aq *applierQuerier) RequiresParsedBeforeRow() bool { return true } +func (aq *applierQuerier) RequiresParsedBeforeRow(catid.DescID) bool { return true } func (aq *applierQuerier) InsertRow( ctx context.Context, diff --git a/pkg/ccl/crosscluster/logical/udf_row_processor_test.go b/pkg/ccl/crosscluster/logical/udf_row_processor_test.go index c3c739537184..c95ea5065908 100644 --- a/pkg/ccl/crosscluster/logical/udf_row_processor_test.go +++ b/pkg/ccl/crosscluster/logical/udf_row_processor_test.go @@ -10,18 +10,14 @@ package logical import ( "context" - gosql "database/sql" "fmt" "testing" - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" - "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -36,6 +32,15 @@ AS $$ BEGIN RETURN 'accept_proposed'; END; +$$ LANGUAGE plpgsql` + + testingUDFAcceptProposedBaseWithSchema = ` +CREATE OR REPLACE FUNCTION %[1]s.repl_apply(action STRING, data %[2]s, existing %[2]s, prev %[2]s, existing_mvcc_timestamp DECIMAL, existing_origin_timestamp DECIMAL, proposed_mvcc_timetamp DECIMAL, proposed_previous_mvcc_timestamp DECIMAL) +RETURNS string +AS $$ +BEGIN + RETURN 'accept_proposed'; +END; $$ LANGUAGE plpgsql` ) @@ -46,11 +51,8 @@ func TestUDFWithRandomTables(t *testing.T) { skip.WithIssue(t, 127315, "composite types generated by randgen currently unsupported by LDR") ctx := context.Background() - s, sqlA, sqlB, cleanup := setupTwoDBUDFTestCluster(t) - defer cleanup() - - runnerA := sqlutils.MakeSQLRunner(sqlA) - runnerB := sqlutils.MakeSQLRunner(sqlB) + tc, s, runnerA, runnerB := setupLogicalTestServer(t, ctx, testClusterBaseClusterArgs, 1) + defer tc.Stopper().Stop(ctx) tableName := "rand_table" rng, _ := randutil.NewPseudoRand() @@ -65,7 +67,7 @@ func TestUDFWithRandomTables(t *testing.T) { randgen.RequirePrimaryIndex(), ) stmt := tree.SerializeForDisplay(createStmt) - t.Logf(stmt) + t.Log(stmt) runnerA.Exec(t, stmt) runnerB.Exec(t, stmt) runnerB.Exec(t, fmt.Sprintf(testingUDFAcceptProposedBase, tableName)) @@ -81,6 +83,7 @@ func TestUDFWithRandomTables(t *testing.T) { // optimization decisions to our replication process. runnerA.Exec(t, "SET plan_cache_mode=force_generic_plan") + sqlA := s.SQLConn(t, serverutils.DBName("a")) numInserts := 20 _, err := randgen.PopulateTableWithRandData(rng, sqlA, tableName, numInserts, nil) @@ -97,7 +100,6 @@ func TestUDFWithRandomTables(t *testing.T) { var jobBID jobspb.JobID runnerB.QueryRow(t, streamStartStmt, dbAURL.String()).Scan(&jobBID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) runnerA.Exec(t, fmt.Sprintf("DELETE FROM %s LIMIT 5", tableName)) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) @@ -108,11 +110,11 @@ func TestUDFWithRandomTables(t *testing.T) { func TestUDFInsertOnly(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, sqlA, sqlB, cleanup := setupTwoDBUDFTestCluster(t) - defer cleanup() - runnerA := sqlutils.MakeSQLRunner(sqlA) - runnerB := sqlutils.MakeSQLRunner(sqlB) + ctx := context.Background() + tc, s, runnerA, runnerB := setupLogicalTestServer(t, ctx, testClusterBaseClusterArgs, 1) + defer tc.Stopper().Stop(ctx) + tableName := "tallies" stmt := "CREATE TABLE tallies(pk INT PRIMARY KEY, v INT)" runnerA.Exec(t, stmt) @@ -143,7 +145,6 @@ func TestUDFInsertOnly(t *testing.T) { var jobBID jobspb.JobID runnerB.QueryRow(t, streamStartStmt, dbAURL.String()).Scan(&jobBID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) runnerA.Exec(t, "INSERT INTO tallies VALUES (5, 55)") runnerA.Exec(t, "DELETE FROM tallies WHERE pk = 4") @@ -162,11 +163,11 @@ func TestUDFInsertOnly(t *testing.T) { func TestUDFPreviousValue(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, sqlA, sqlB, cleanup := setupTwoDBUDFTestCluster(t) - defer cleanup() - runnerA := sqlutils.MakeSQLRunner(sqlA) - runnerB := sqlutils.MakeSQLRunner(sqlB) + ctx := context.Background() + tc, s, runnerA, runnerB := setupLogicalTestServer(t, ctx, testClusterBaseClusterArgs, 1) + defer tc.Stopper().Stop(ctx) + tableName := "tallies" stmt := "CREATE TABLE tallies(pk INT PRIMARY KEY, v INT)" runnerA.Exec(t, stmt) @@ -197,49 +198,11 @@ func TestUDFPreviousValue(t *testing.T) { var jobBID jobspb.JobID runnerB.QueryRow(t, streamStartStmt, dbAURL.String()).Scan(&jobBID) - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) runnerA.Exec(t, "UPDATE tallies SET v = 15 WHERE pk = 1") - - t.Logf("waiting for replication job %d", jobBID) WaitUntilReplicatedTime(t, s.Clock().Now(), runnerB, jobBID) runnerB.CheckQueryResults(t, "SELECT * FROM tallies", [][]string{ {"1", "25"}, }) } - -func setupTwoDBUDFTestCluster( - t *testing.T, -) (serverutils.ApplicationLayerInterface, *gosql.DB, *gosql.DB, func()) { - ctx := context.Background() - srv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ - DefaultTestTenant: base.TestControlsTenantsExplicitly, - Knobs: base.TestingKnobs{ - JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - }, - }) - s := srv.ApplicationLayer() - - _, err := sqlDB.Exec("CREATE DATABASE a") - require.NoError(t, err) - _, err = sqlDB.Exec("CREATE DATABASE b") - require.NoError(t, err) - - sysSQL := srv.SystemLayer().SQLConn(t) - sqlA := s.SQLConn(t, serverutils.DBName("a")) - sqlB := s.SQLConn(t, serverutils.DBName("b")) - for _, s := range testClusterSettings { - _, err := sqlA.Exec(s) - require.NoError(t, err) - } - for _, s := range testClusterSystemSettings { - _, err = sysSQL.Exec(s) - require.NoError(t, err) - } - defaultSQLProcessor = udfApplierProcessor - return s, sqlA, sqlB, func() { - srv.Stopper().Stop(ctx) - defaultSQLProcessor = lwwProcessor - } -} diff --git a/pkg/ccl/crosscluster/physical/BUILD.bazel b/pkg/ccl/crosscluster/physical/BUILD.bazel index 8d1a3995bdc5..f205f759fef5 100644 --- a/pkg/ccl/crosscluster/physical/BUILD.bazel +++ b/pkg/ccl/crosscluster/physical/BUILD.bazel @@ -28,6 +28,7 @@ go_library( "//pkg/ccl/crosscluster/streamclient", "//pkg/ccl/revertccl", "//pkg/ccl/utilccl", + "//pkg/cloud", "//pkg/cloud/externalconn", "//pkg/cloud/externalconn/connectionpb", "//pkg/jobs", diff --git a/pkg/ccl/crosscluster/physical/external_connection.go b/pkg/ccl/crosscluster/physical/external_connection.go index 7660cf43e5c0..4997367645bc 100644 --- a/pkg/ccl/crosscluster/physical/external_connection.go +++ b/pkg/ccl/crosscluster/physical/external_connection.go @@ -13,6 +13,7 @@ import ( "net/url" "github.com/cockroachdb/cockroach/pkg/ccl/crosscluster/streamclient" + "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/connectionpb" ) @@ -47,7 +48,7 @@ func init() { connectionpb.ConnectionProvider_sql, externalconn.SimpleURIFactory, ) - + cloud.RegisterRedactedParams(cloud.RedactedParams(streamclient.SslInlineURLParam)) externalconn.RegisterDefaultValidation(scheme, validatePostgresConnectionURI) } diff --git a/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go b/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go index 7d069d1b35aa..0f7781cc512a 100644 --- a/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go +++ b/pkg/ccl/crosscluster/physical/replication_stream_e2e_test.go @@ -270,7 +270,7 @@ func TestTenantStreamingCheckpoint(t *testing.T) { lastClientStart := make(map[string]hlc.Timestamp) args := replicationtestutils.DefaultTenantStreamingClustersArgs args.TestingKnobs = &sql.StreamingTestingKnobs{ - BeforeClientSubscribe: func(addr string, token string, clientStartTimes span.Frontier) { + BeforeClientSubscribe: func(addr string, token string, clientStartTimes span.Frontier, _ bool) { lastClientStart[token] = clientStartTimes.Frontier() }, } @@ -685,7 +685,7 @@ func TestTenantStreamingMultipleNodes(t *testing.T) { clientAddresses := make(map[string]struct{}) var addressesMu syncutil.Mutex args.TestingKnobs = &sql.StreamingTestingKnobs{ - BeforeClientSubscribe: func(addr string, token string, _ span.Frontier) { + BeforeClientSubscribe: func(addr string, token string, _ span.Frontier, _ bool) { addressesMu.Lock() defer addressesMu.Unlock() clientAddresses[addr] = struct{}{} @@ -797,7 +797,7 @@ func TestStreamingAutoReplan(t *testing.T) { clientAddresses := make(map[string]struct{}) var addressesMu syncutil.Mutex args.TestingKnobs = &sql.StreamingTestingKnobs{ - BeforeClientSubscribe: func(addr string, token string, _ span.Frontier) { + BeforeClientSubscribe: func(addr string, token string, _ span.Frontier, _ bool) { addressesMu.Lock() defer addressesMu.Unlock() clientAddresses[addr] = struct{}{} @@ -880,7 +880,7 @@ func TestStreamingReplanOnLag(t *testing.T) { clientAddresses := make(map[string]struct{}) var addressesMu syncutil.Mutex args.TestingKnobs = &sql.StreamingTestingKnobs{ - BeforeClientSubscribe: func(addr string, token string, _ span.Frontier) { + BeforeClientSubscribe: func(addr string, token string, _ span.Frontier, _ bool) { addressesMu.Lock() defer addressesMu.Unlock() clientAddresses[addr] = struct{}{} diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_planning.go b/pkg/ccl/crosscluster/physical/stream_ingestion_planning.go index f3138e3fde8f..70b7defa3954 100644 --- a/pkg/ccl/crosscluster/physical/stream_ingestion_planning.go +++ b/pkg/ccl/crosscluster/physical/stream_ingestion_planning.go @@ -54,7 +54,6 @@ func streamIngestionJobDescription( ReplicationSourceTenantName: streamIngestion.ReplicationSourceTenantName, ReplicationSourceAddress: tree.NewDString(redactedSourceAddr), Options: streamIngestion.Options, - Like: streamIngestion.Like, } ann := p.ExtendedEvalContext().Annotations return tree.AsStringWithFQNames(redactedCreateStmt, ann), nil @@ -74,11 +73,6 @@ func ingestionTypeCheck( ingestionStmt.ReplicationSourceAddress, ingestionStmt.Options.Retention}, } - if ingestionStmt.Like.OtherTenant != nil { - toTypeCheck = append(toTypeCheck, - exprutil.TenantSpec{TenantSpec: ingestionStmt.Like.OtherTenant}, - ) - } if err := exprutil.TypeCheck(ctx, "INGESTION", p.SemaCtx(), toTypeCheck...); err != nil { return false, nil, err @@ -117,15 +111,6 @@ func ingestionPlanHook( return nil, nil, nil, false, err } - var likeTenantID uint64 - var likeTenantName string - if ingestionStmt.Like.OtherTenant != nil { - _, likeTenantID, likeTenantName, err = exprEval.TenantSpec(ctx, ingestionStmt.Like.OtherTenant) - if err != nil { - return nil, nil, nil, false, err - } - } - evalCtx := &p.ExtendedEvalContext().Context options, err := evalTenantReplicationOptions(ctx, ingestionStmt.Options, exprEval, evalCtx, p.SemaCtx(), createReplicationOp) if err != nil { @@ -175,12 +160,8 @@ func ingestionPlanHook( // If we don't have a resume timestamp, make a new tenant jobID := p.ExecCfg().JobRegistry.MakeJobID() var destinationTenantID roachpb.TenantID - // Determine which template will be used as config template to - // create the new tenant below. - tenantInfo, err := sql.GetTenantTemplate(ctx, p.ExecCfg().Settings, p.InternalSQLTxn(), nil, likeTenantID, likeTenantName) - if err != nil { - return err - } + + var tenantInfo mtinfopb.TenantInfoWithUsage // Create a new tenant for the replication stream. tenantInfo.PhysicalReplicationConsumerJobID = jobID @@ -197,7 +178,7 @@ func ingestionPlanHook( ctx, p.ExecCfg().Codec, p.ExecCfg().Settings, p.InternalSQLTxn(), p.ExecCfg().SpanConfigKVAccessor.WithISQLTxn(ctx, p.InternalSQLTxn()), - tenantInfo, initialTenantZoneConfig, + &tenantInfo, initialTenantZoneConfig, ingestionStmt.IfNotExists, p.ExecCfg().TenantTestingKnobs, ) diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go b/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go index 750c29a524b6..a51f7a45f4e3 100644 --- a/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go +++ b/pkg/ccl/crosscluster/physical/stream_ingestion_processor.go @@ -452,7 +452,7 @@ func (sip *streamIngestionProcessor) Start(ctx context.Context) { if streamingKnobs, ok := sip.FlowCtx.TestingKnobs().StreamingTestingKnobs.(*sql.StreamingTestingKnobs); ok { if streamingKnobs != nil && streamingKnobs.BeforeClientSubscribe != nil { - streamingKnobs.BeforeClientSubscribe(addr, string(token), sip.frontier) + streamingKnobs.BeforeClientSubscribe(addr, string(token), sip.frontier, false) } } diff --git a/pkg/ccl/crosscluster/physical/stream_ingestion_processor_test.go b/pkg/ccl/crosscluster/physical/stream_ingestion_processor_test.go index e77136081cea..e8826529ae4d 100644 --- a/pkg/ccl/crosscluster/physical/stream_ingestion_processor_test.go +++ b/pkg/ccl/crosscluster/physical/stream_ingestion_processor_test.go @@ -343,7 +343,7 @@ func TestStreamIngestionProcessor(t *testing.T) { } lastClientStart := make(map[string]hlc.Timestamp) - streamingTestingKnobs := &sql.StreamingTestingKnobs{BeforeClientSubscribe: func(addr string, token string, clientStartTimes span.Frontier) { + streamingTestingKnobs := &sql.StreamingTestingKnobs{BeforeClientSubscribe: func(addr string, token string, clientStartTimes span.Frontier, _ bool) { sp := p1Span if token == string(p2) { sp = p2Span diff --git a/pkg/ccl/crosscluster/producer/event_stream.go b/pkg/ccl/crosscluster/producer/event_stream.go index 451184f29bd4..f42876522fc8 100644 --- a/pkg/ccl/crosscluster/producer/event_stream.go +++ b/pkg/ccl/crosscluster/producer/event_stream.go @@ -150,6 +150,7 @@ func (s *eventStream) Start(ctx context.Context, txn *kv.Txn) (retErr error) { rangefeed.WithOnValues(s.onValues), rangefeed.WithDiff(s.spec.WithDiff), rangefeed.WithInvoker(func(fn func() error) error { return fn() }), + rangefeed.WithFiltering(s.spec.WithFiltering), } if emitMetadata.Get(&s.execCfg.Settings.SV) { opts = append(opts, rangefeed.WithOnMetadata(s.onMetadata)) diff --git a/pkg/ccl/crosscluster/producer/span_config_event_stream.go b/pkg/ccl/crosscluster/producer/span_config_event_stream.go index feb2c73c5be6..90df0fb04eb1 100644 --- a/pkg/ccl/crosscluster/producer/span_config_event_stream.go +++ b/pkg/ccl/crosscluster/producer/span_config_event_stream.go @@ -157,7 +157,12 @@ func (s *spanConfigEventStream) Next(ctx context.Context) (bool, error) { case err := <-s.errCh: return false, err case s.data = <-s.streamCh: - return true, nil + select { + case err := <-s.errCh: + return false, err + default: + return true, nil + } } } diff --git a/pkg/ccl/crosscluster/replicationtestutils/testutils.go b/pkg/ccl/crosscluster/replicationtestutils/testutils.go index e57fac8fa4b3..e3978fcf214d 100644 --- a/pkg/ccl/crosscluster/replicationtestutils/testutils.go +++ b/pkg/ccl/crosscluster/replicationtestutils/testutils.go @@ -245,7 +245,7 @@ func (c *TenantStreamingClusters) WaitUntilStartTimeReached(ingestionJobID jobsp func (c *TenantStreamingClusters) WaitForPostCutoverRetentionJob() { c.DestSysSQL.Exec(c.T, fmt.Sprintf(`ALTER TENANT '%s' SET REPLICATION EXPIRATION WINDOW ='10ms'`, c.Args.DestTenantName)) var retentionJobID jobspb.JobID - retentionJobQuery := fmt.Sprintf(`SELECT job_id FROM [SHOW JOBS] + retentionJobQuery := fmt.Sprintf(`SELECT job_id FROM [SHOW JOBS] WHERE description = 'History Retention for Physical Replication of %s' ORDER BY created DESC LIMIT 1`, c.Args.DestTenantName) c.DestSysSQL.QueryRow(c.T, retentionJobQuery).Scan(&retentionJobID) @@ -281,7 +281,7 @@ func (c *TenantStreamingClusters) Cutover( require.Equal(c.T, cutoverTime, cutoverOutput.GoTime()) } - protectedTimestamp := replicationutils.TestingGetPTSFromReplicationJob(c.T, ctx, c.SrcSysSQL, c.SrcSysServer, producerJobID) + protectedTimestamp := replicationutils.TestingGetPTSFromReplicationJob(c.T, ctx, c.SrcSysSQL, c.SrcSysServer, jobspb.JobID(producerJobID)) require.LessOrEqual(c.T, protectedTimestamp.GoTime(), cutoverOutput.GoTime()) // PTS should be less than or equal to retained time as a result of heartbeats. diff --git a/pkg/ccl/crosscluster/replicationutils/BUILD.bazel b/pkg/ccl/crosscluster/replicationutils/BUILD.bazel index a4aea5d1e385..fb873b6d3625 100644 --- a/pkg/ccl/crosscluster/replicationutils/BUILD.bazel +++ b/pkg/ccl/crosscluster/replicationutils/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/kv/kvpb", + "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/repstream/streampb", "//pkg/roachpb", @@ -24,6 +25,7 @@ go_library( "//pkg/util/ctxgroup", "//pkg/util/hlc", "//pkg/util/timeutil", + "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], diff --git a/pkg/ccl/crosscluster/replicationutils/utils.go b/pkg/ccl/crosscluster/replicationutils/utils.go index a408b6ce78c6..465b059d8f03 100644 --- a/pkg/ccl/crosscluster/replicationutils/utils.go +++ b/pkg/ccl/crosscluster/replicationutils/utils.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -33,6 +34,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -307,11 +309,9 @@ func TestingGetPTSFromReplicationJob( ctx context.Context, sqlRunner *sqlutils.SQLRunner, srv serverutils.ApplicationLayerInterface, - producerJobID int, + producerJobID jobspb.JobID, ) hlc.Timestamp { - payload := jobutils.GetJobPayload(t, sqlRunner, jobspb.JobID(producerJobID)) - details := payload.GetStreamReplication() - ptsRecordID := details.ProtectedTimestampRecordID + ptsRecordID := getPTSRecordIDFromProducerJob(t, sqlRunner, producerJobID) ptsProvider := srv.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider var ptsRecord *ptpb.Record @@ -330,7 +330,7 @@ func WaitForPTSProtection( ctx context.Context, sqlRunner *sqlutils.SQLRunner, srv serverutils.ApplicationLayerInterface, - producerJobID int, + producerJobID jobspb.JobID, minTime hlc.Timestamp, ) { testutils.SucceedsSoon(t, func() error { @@ -342,8 +342,40 @@ func WaitForPTSProtection( }) } -func GetLatestProducerJobID(t *testing.T, sqlRunner *sqlutils.SQLRunner) int { - var producerJobID int - sqlRunner.QueryRow(t, "SELECT id FROM system.jobs WHERE job_type = 'REPLICATION STREAM PRODUCER' ORDER BY created DESC LIMIT 1").Scan(&producerJobID) - return producerJobID +func WaitForPTSProtectionToNotExist( + t *testing.T, + ctx context.Context, + sqlRunner *sqlutils.SQLRunner, + srv serverutils.ApplicationLayerInterface, + producerJobID jobspb.JobID, +) { + ptsRecordID := getPTSRecordIDFromProducerJob(t, sqlRunner, producerJobID) + ptsProvider := srv.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider + testutils.SucceedsSoon(t, func() error { + err := srv.InternalDB().(descs.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := ptsProvider.WithTxn(txn).GetRecord(ctx, ptsRecordID) + return err + }) + if errors.Is(err, protectedts.ErrNotExists) { + return nil + } + if err == nil { + return errors.New("PTS record still exists") + } + return err + }) +} + +func getPTSRecordIDFromProducerJob( + t *testing.T, sqlRunner *sqlutils.SQLRunner, producerJobID jobspb.JobID, +) uuid.UUID { + payload := jobutils.GetJobPayload(t, sqlRunner, producerJobID) + return payload.GetStreamReplication().ProtectedTimestampRecordID +} + +func GetProducerJobIDFromLDRJob( + t *testing.T, sqlRunner *sqlutils.SQLRunner, ldrJobID jobspb.JobID, +) jobspb.JobID { + payload := jobutils.GetJobPayload(t, sqlRunner, ldrJobID) + return jobspb.JobID(payload.GetLogicalReplicationDetails().StreamID) } diff --git a/pkg/ccl/crosscluster/streamclient/pgconn.go b/pkg/ccl/crosscluster/streamclient/pgconn.go index 4c61840985ea..6ac13356e212 100644 --- a/pkg/ccl/crosscluster/streamclient/pgconn.go +++ b/pkg/ccl/crosscluster/streamclient/pgconn.go @@ -22,10 +22,10 @@ import ( ) const ( - // sslInlineURLParam is a non-standard connection URL + // SslInlineURLParam is a non-standard connection URL // parameter. When true, we assume that sslcert, sslkey, and // sslrootcert contain URL-encoded data rather than paths. - sslInlineURLParam = "sslinline" + SslInlineURLParam = "sslinline" sslModeURLParam = "sslmode" sslCertURLParam = "sslcert" @@ -82,7 +82,7 @@ type tlsCerts struct { // tlsCerts struct can be used to apply the certificate data to the // tls.Config produced by pgx. func uriWithInlineTLSCertsRemoved(remote *url.URL) (*url.URL, *tlsCerts, error) { - if remote.Query().Get(sslInlineURLParam) != "true" { + if remote.Query().Get(SslInlineURLParam) != "true" { return remote, nil, nil } @@ -132,7 +132,7 @@ func uriWithInlineTLSCertsRemoved(remote *url.URL) (*url.URL, *tlsCerts, error) v.Del(sslCertURLParam) v.Del(sslKeyURLParam) v.Del(sslRootCertURLParam) - v.Del(sslInlineURLParam) + v.Del(SslInlineURLParam) retURL.RawQuery = v.Encode() return &retURL, tlsInfo, nil } diff --git a/pkg/ccl/gssapiccl/gssapi.go b/pkg/ccl/gssapiccl/gssapi.go index 5d5c9f9524d4..9ab969282a95 100644 --- a/pkg/ccl/gssapiccl/gssapi.go +++ b/pkg/ccl/gssapiccl/gssapi.go @@ -41,6 +41,7 @@ const ( func authGSS( _ context.Context, c pgwire.AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, entry *hba.Entry, diff --git a/pkg/ccl/jwtauthccl/BUILD.bazel b/pkg/ccl/jwtauthccl/BUILD.bazel index fecb0c41370d..5d8ea3aa5806 100644 --- a/pkg/ccl/jwtauthccl/BUILD.bazel +++ b/pkg/ccl/jwtauthccl/BUILD.bazel @@ -22,8 +22,9 @@ go_library( "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", - "@com_github_lestrrat_go_jwx//jwk", - "@com_github_lestrrat_go_jwx//jwt", + "@com_github_lestrrat_go_jwx_v2//jwk", + "@com_github_lestrrat_go_jwx_v2//jws", + "@com_github_lestrrat_go_jwx_v2//jwt", "@org_golang_x_exp//maps", ], ) @@ -58,9 +59,9 @@ go_test( "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_cockroachdb_redact//:redact", - "@com_github_lestrrat_go_jwx//jwa", - "@com_github_lestrrat_go_jwx//jwk", - "@com_github_lestrrat_go_jwx//jwt", + "@com_github_lestrrat_go_jwx_v2//jwa", + "@com_github_lestrrat_go_jwx_v2//jwk", + "@com_github_lestrrat_go_jwx_v2//jwt", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/jwtauthccl/authentication_jwt.go b/pkg/ccl/jwtauthccl/authentication_jwt.go index f99354b71844..b23f0e385bc3 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt.go @@ -27,8 +27,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" - "github.com/lestrrat-go/jwx/jwk" - "github.com/lestrrat-go/jwx/jwt" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/jwx/v2/jwt" ) const ( @@ -157,9 +158,13 @@ func (authenticator *jwtAuthenticator) ValidateJWTLogin( telemetry.Inc(beginAuthUseCounter) - // Just parse the token to check the format is valid and issuer is present. - // The token will be parsed again later to actually verify the signature. - unverifiedToken, err := jwt.Parse(tokenBytes) + // Validate the token as below: + // 1. Check the token format and extract issuer + // jwx/v2 library mandates signature verification with Parse, + // so use ParseInsecure instead + // 2. Fetch JWKS corresponding to the issuer + // 3. Use Parse for signature verification + unverifiedToken, err := jwt.ParseInsecure(tokenBytes) if err != nil { return "", errors.WithDetailf( errors.Newf("JWT authentication: invalid token"), @@ -185,7 +190,7 @@ func (authenticator *jwtAuthenticator) ValidateJWTLogin( } // Now that both the issuer and key-id are matched, parse the token again to validate the signature. - parsedToken, err := jwt.Parse(tokenBytes, jwt.WithKeySet(jwkSet), jwt.WithValidate(true), jwt.InferAlgorithmFromKey(true)) + parsedToken, err := jwt.Parse(tokenBytes, jwt.WithKeySet(jwkSet, jws.WithInferAlgorithmFromKey(true)), jwt.WithValidate(true)) if err != nil { return "", errors.WithDetailf( errors.Newf("JWT authentication: invalid token"), diff --git a/pkg/ccl/jwtauthccl/authentication_jwt_test.go b/pkg/ccl/jwtauthccl/authentication_jwt_test.go index 97d3dd86b841..03818dca1132 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt_test.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt_test.go @@ -42,9 +42,9 @@ import ( "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" "github.com/cockroachdb/redact" - "github.com/lestrrat-go/jwx/jwa" - "github.com/lestrrat-go/jwx/jwk" - "github.com/lestrrat-go/jwx/jwt" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/jwt" "github.com/stretchr/testify/require" ) @@ -72,8 +72,8 @@ func createJWKS(t *testing.T) (jwk.Set, jwk.Key, jwk.Key) { pubKey2, err := key2.PublicKey() require.NoError(t, err) set := jwk.NewSet() - set.Add(pubKey1) - set.Add(pubKey2) + require.NoError(t, set.AddKey(pubKey1)) + require.NoError(t, set.AddKey(pubKey2)) return set, key1, key2 } @@ -81,7 +81,7 @@ func createJWKS(t *testing.T) (jwk.Set, jwk.Key, jwk.Key) { func createECDSAKey(t *testing.T, keyID string) jwk.Key { raw, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) require.NoError(t, err) - key, err := jwk.New(raw) + key, err := jwk.FromRaw(raw) require.NoError(t, err) require.NoError(t, key.Set(jwk.KeyIDKey, keyID)) require.NoError(t, key.Set(jwk.AlgorithmKey, jwa.ES384)) @@ -91,7 +91,7 @@ func createECDSAKey(t *testing.T, keyID string) jwk.Key { func createRSAKey(t *testing.T, keyID string) jwk.Key { raw, err := rsa.GenerateKey(rand.Reader, 2048) require.NoError(t, err) - key, err := jwk.New(raw) + key, err := jwk.FromRaw(raw) require.NoError(t, err) require.NoError(t, key.Set(jwk.KeyIDKey, keyID)) require.NoError(t, key.Set(jwk.AlgorithmKey, jwa.RS256)) @@ -117,7 +117,7 @@ func createJWT( if customClaimName != "" { require.NoError(t, token.Set(customClaimName, customClaimValue)) } - signedTokenBytes, err := jwt.Sign(token, algorithm, key) + signedTokenBytes, err := jwt.Sign(token, jwt.WithKey(algorithm, key)) require.NoError(t, err) return signedTokenBytes } @@ -257,7 +257,7 @@ func TestJWTMultiKey(t *testing.T) { publicKey, err := key.PublicKey() require.NoError(t, err) keySetWithOneKey := jwk.NewSet() - keySetWithOneKey.Add(publicKey) + require.NoError(t, keySetWithOneKey.AddKey(publicKey)) // Set the JWKS to only include jwk1. JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySetWithOneKey)) @@ -301,7 +301,7 @@ func TestExpiredToken(t *testing.T) { // Validation fails with an invalid token error for tokens with an expiration date in the past. _, err = verifier.ValidateJWTLogin(ctx, s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) require.ErrorContains(t, err, "JWT authentication: invalid token") - require.EqualValues(t, "unable to parse token: exp not satisfied", errors.GetAllDetails(err)[0]) + require.EqualValues(t, "unable to parse token: \"exp\" not satisfied", errors.GetAllDetails(err)[0]) } func TestKeyIdMismatch(t *testing.T) { @@ -701,7 +701,7 @@ func Test_JWKSFetchWorksWhenEnabled(t *testing.T) { // Create key from a file. This key will be used to sign the token. // Matching public key available in jwks URI is used to verify token. keySet := createJWKSFromFile(t, "testdata/www.idp1apis.com_oauth2_v3_certs_private") - key, _ := keySet.Get(0) + key, _ := keySet.Key(0) validIssuer := "https://accounts.idp1.com" token := createJWT(t, username1, audience1, validIssuer, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") @@ -755,7 +755,7 @@ func Test_JWKSFetchWorksWhenEnabledIgnoresTheStaticJWKS(t *testing.T) { // Create key from a file. This key will be used to sign the token. // Matching public key available in jwks URI is used to verify token. keySetUsedForSigning := createJWKSFromFile(t, "testdata/www.idp1apis.com_oauth2_v3_certs_private") - key, _ := keySetUsedForSigning.Get(0) + key, _ := keySetUsedForSigning.Key(0) validIssuer := "https://accounts.idp1.com" token := createJWT(t, username1, audience1, validIssuer, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") @@ -897,7 +897,7 @@ func TestJWTAuthWithCustomCACert(t *testing.T) { // Create a key to sign the token using testdata. // The same will be fetched through the JWKS URI to verify the token. keySet := createJWKSFromFile(t, "testdata/www.idp1apis.com_oauth2_v3_certs_private") - key, _ := keySet.Get(0) + key, _ := keySet.Key(0) issuer := testServerURL token := createJWT( t, username1, audience1, issuer, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") @@ -1016,7 +1016,7 @@ func TestJWTAuthClientTimeout(t *testing.T) { // Create a key to sign the token using testdata. // The same will be fetched through the JWKS URI to verify the token. keySet := createJWKSFromFile(t, "testdata/www.idp1apis.com_oauth2_v3_certs_private") - key, _ := keySet.Get(0) + key, _ := keySet.Key(0) validIssuer := "https://accounts.idp1.com" token := createJWT( t, username1, audience1, validIssuer, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") @@ -1087,7 +1087,7 @@ func TestJWTAuthWithIssuerJWKSConfAutoFetchJWKS(t *testing.T) { // Create a key to sign the token using testdata. // The same will be fetched through the JWKS URI to verify the token. keySet := createJWKSFromFile(t, "testdata/www.idp1apis.com_oauth2_v3_certs_private") - key, _ := keySet.Get(0) + key, _ := keySet.Key(0) issuer := testServerURL token := createJWT( t, username1, audience1, issuer, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") diff --git a/pkg/ccl/jwtauthccl/settings.go b/pkg/ccl/jwtauthccl/settings.go index 78f495ad8b2e..5ce13d3d58c3 100644 --- a/pkg/ccl/jwtauthccl/settings.go +++ b/pkg/ccl/jwtauthccl/settings.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/errors" - "github.com/lestrrat-go/jwx/jwk" + "github.com/lestrrat-go/jwx/v2/jwk" "golang.org/x/exp/maps" ) diff --git a/pkg/ccl/ldapccl/BUILD.bazel b/pkg/ccl/ldapccl/BUILD.bazel index 4cbfab87ddc5..1d0f456bfc12 100644 --- a/pkg/ccl/ldapccl/BUILD.bazel +++ b/pkg/ccl/ldapccl/BUILD.bazel @@ -4,6 +4,9 @@ go_library( name = "ldapccl", srcs = [ "authentication_ldap.go", + "authorization_ldap.go", + "ldap_manager.go", + "ldap_test_util.go", "ldap_util.go", "settings.go", ], @@ -13,6 +16,7 @@ go_library( "//pkg/ccl/utilccl", "//pkg/clusterversion", "//pkg/security", + "//pkg/security/distinguishedname", "//pkg/security/username", "//pkg/server/telemetry", "//pkg/settings", @@ -36,6 +40,7 @@ go_test( size = "small", srcs = [ "authentication_ldap_test.go", + "authorization_ldap_test.go", "main_test.go", "settings_test.go", ], @@ -45,11 +50,11 @@ go_test( "//pkg/base", "//pkg/ccl", "//pkg/security/certnames", + "//pkg/security/distinguishedname", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", - "//pkg/sql/pgwire/hba", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/testcluster", @@ -58,7 +63,6 @@ go_test( "//pkg/util/randutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", - "@com_github_go_ldap_ldap_v3//:ldap", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/ldapccl/authentication_ldap.go b/pkg/ccl/ldapccl/authentication_ldap.go index 6db65507c726..a93ecd90f598 100644 --- a/pkg/ccl/ldapccl/authentication_ldap.go +++ b/pkg/ccl/ldapccl/authentication_ldap.go @@ -13,181 +13,143 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/security/distinguishedname" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" + "github.com/go-ldap/ldap/v3" ) const ( - counterPrefix = "auth.ldap." - beginAuthCounterName = counterPrefix + "begin_auth" + beginAuthNCounterName = counterPrefix + "begin_authentication" loginSuccessCounterName = counterPrefix + "login_success" - enableCounterName = counterPrefix + "enable" ) var ( - beginAuthUseCounter = telemetry.GetCounterOnce(beginAuthCounterName) + beginAuthNUseCounter = telemetry.GetCounterOnce(beginAuthNCounterName) loginSuccessUseCounter = telemetry.GetCounterOnce(loginSuccessCounterName) - enableUseCounter = telemetry.GetCounterOnce(enableCounterName) ) -// ldapAuthenticator is an object that is used to enable ldap connection -// validation that are used as part of the CRDB client auth flow. -// -// The implementation uses the `go-ldap/ldap/` client package and is supported -// through a number of cluster settings defined in `ldapccl/settings.go`. These -// settings specify how the ldap auth attempt should be executed and if this -// feature is enabled. -type ldapAuthenticator struct { - mu struct { - syncutil.RWMutex - // conf contains all the values that come from cluster settings. - conf ldapAuthenticatorConf - // util contains connection object required for interfacing with ldap server. - util ILDAPUtil - // enabled represents the present state of if this feature is enabled. It - // is set to true once ldap util is initialized. - enabled bool +// FetchLDAPUserDN fetches the LDAP server DN for the sql user authenticating via LDAP. +// In particular, it checks that: +// * The cluster has an enterprise license. +// * The active cluster version is 24.2 for this feature. +// * LDAP authManager is enabled after settings were reloaded. +// * The auth attempt is not for a reserved user. +// * The hba conf entry options could be parsed to obtain ldap server params. +// * All ldap server params are valid. +// * Configured bind DN and password can be used to search for the sql user DN on ldap server. +// It returns the retrievedUserDN which is the DN associated with the user in +// LDAP server, authError (which is the error sql clients will see in case of +// failures) and detailedError (which is the internal error from ldap clients +// that might contain sensitive information we do not want to send to sql +// clients but still want to log it). We do not want to send any information +// back to client which was not provided by the client. +func (authManager *ldapAuthManager) FetchLDAPUserDN( + ctx context.Context, + st *cluster.Settings, + user username.SQLUsername, + entry *hba.Entry, + _ *identmap.Conf, +) (retrievedUserDN *ldap.DN, detailedErrorMsg redact.RedactableString, authError error) { + if err := utilccl.CheckEnterpriseEnabled(st, "LDAP authentication"); err != nil { + return nil, "", err } - // clusterUUID is used to check the validity of the enterprise license. It is - // set once at initialization. - clusterUUID uuid.UUID -} - -// ldapAuthenticatorConf contains all the values to configure LDAP -// authentication. These values are copied from the matching cluster settings or -// from hba conf options for LDAP entry. -type ldapAuthenticatorConf struct { - domainCACert string - clientTLSCert string - clientTLSKey string - ldapServer string - ldapPort string - ldapBaseDN string - ldapBindDN string - ldapBindPassword string - ldapSearchFilter string - ldapSearchAttribute string -} - -// reloadConfig locks mutex and then refreshes the values in conf from the cluster settings. -func (authenticator *ldapAuthenticator) reloadConfig(ctx context.Context, st *cluster.Settings) { - authenticator.mu.Lock() - defer authenticator.mu.Unlock() - authenticator.reloadConfigLocked(ctx, st) -} - -// reloadConfig refreshes the values in conf from the cluster settings without locking the mutex. -func (authenticator *ldapAuthenticator) reloadConfigLocked( - ctx context.Context, st *cluster.Settings, -) { - conf := ldapAuthenticatorConf{ - domainCACert: LDAPDomainCACertificate.Get(&st.SV), - clientTLSCert: LDAPClientTLSCertSetting.Get(&st.SV), - clientTLSKey: LDAPClientTLSKeySetting.Get(&st.SV), + if !st.Version.IsActive(ctx, clusterversion.V24_2) { + return nil, "", pgerror.Newf(pgcode.FeatureNotSupported, "LDAP authentication is only supported after v24.2 upgrade is finalized") } - authenticator.mu.conf = conf - var err error - authenticator.mu.util, err = NewLDAPUtil(ctx, authenticator.mu.conf) - if err != nil { - log.Warningf(ctx, "LDAP authentication: unable to initialize LDAP connection: %v", err) - return + authManager.mu.Lock() + defer authManager.mu.Unlock() + if !authManager.mu.enabled { + return nil, "", errors.Newf("LDAP authentication: not enabled") } - if !authenticator.mu.enabled { - telemetry.Inc(enableUseCounter) + if user.IsRootUser() || user.IsReserved() { + return nil, "", errors.WithDetailf( + errors.Newf("LDAP authentication: invalid identity"), + "cannot use LDAP auth to login to a reserved user %s", user.Normalized()) } - authenticator.mu.enabled = true - log.Infof(ctx, "initialized LDAP authenticator") -} -// setLDAPConfigOptions extracts hba conf parameters required for connecting and -// querying LDAP server from hba conf entry and sets them for LDAP authenticator. -func (authenticator *ldapAuthenticator) setLDAPConfigOptions(entry *hba.Entry) error { - conf := ldapAuthenticatorConf{ - domainCACert: authenticator.mu.conf.domainCACert, - } - for _, opt := range entry.Options { - switch opt[0] { - case "ldapserver": - conf.ldapServer = opt[1] - case "ldapport": - conf.ldapPort = opt[1] - case "ldapbasedn": - conf.ldapBaseDN = opt[1] - case "ldapbinddn": - conf.ldapBindDN = opt[1] - case "ldapbindpasswd": - conf.ldapBindPassword = opt[1] - case "ldapsearchfilter": - conf.ldapSearchFilter = opt[1] - case "ldapsearchattribute": - conf.ldapSearchAttribute = opt[1] - default: - return errors.Newf("invalid LDAP option provided in hba conf: %s", opt[0]) - } + if err := authManager.setLDAPConfigOptions(entry); err != nil { + return nil, redact.Sprintf("error parsing hba conf options for LDAP: %v", err), + errors.Newf("LDAP authentication: unable to parse hba conf options") } - authenticator.mu.conf = conf - return nil -} -// validateLDAPOptions checks the ldap authenticator config values for validity. -func (authenticator *ldapAuthenticator) validateLDAPOptions() error { - const ldapOptionsErrorMsg = "ldap params in HBA conf missing" - if authenticator.mu.conf.ldapServer == "" { - return errors.New(ldapOptionsErrorMsg + " ldap server") + if err := authManager.validateLDAPBaseOptions(); err != nil { + return nil, redact.Sprintf("error validating base hba conf options for LDAP: %v", err), + errors.Newf("LDAP authentication: unable to validate authManager base options") } - if authenticator.mu.conf.ldapPort == "" { - return errors.New(ldapOptionsErrorMsg + " ldap port") + + if err := authManager.validateLDAPUserFetchOptions(); err != nil { + return nil, redact.Sprintf("error validating authentication hba conf options for LDAP: %v", err), + errors.Newf("LDAP authentication: unable to validate authManager authentication options") } - if authenticator.mu.conf.ldapBaseDN == "" { - return errors.New(ldapOptionsErrorMsg + " base DN") + + // Establish a LDAPs connection with the set LDAP server and port + err := authManager.mu.util.MaybeInitLDAPsConn(ctx, authManager.mu.conf) + if err != nil { + return nil, redact.Sprintf("error when trying to create LDAP connection: %v", err), + errors.Newf("LDAP authentication: unable to establish LDAP connection") } - if authenticator.mu.conf.ldapBindDN == "" { - return errors.New(ldapOptionsErrorMsg + " bind DN") + + // Fetch the ldap server Distinguished Name using sql username as search value + // for ldap search attribute + userDN, err := authManager.mu.util.Search(ctx, authManager.mu.conf, user.Normalized()) + if err != nil { + return nil, redact.Sprintf("error when searching for user in LDAP server: %v", err), + errors.WithDetailf( + errors.Newf("LDAP authentication: unable to find LDAP user distinguished name"), + "cannot find provided user %s on LDAP server", user.Normalized()) } - if authenticator.mu.conf.ldapBindPassword == "" { - return errors.New(ldapOptionsErrorMsg + " bind password") + + retrievedUserDN, err = distinguishedname.ParseDN(userDN) + if err != nil { + return nil, redact.Sprintf("error parsing user DN %s obtained from LDAP server: %v", userDN, err), + errors.WithDetailf( + errors.Newf("LDAP authentication: unable to parse LDAP user distinguished name"), + "cannot find provided user %s on LDAP server", user.Normalized()) } - if authenticator.mu.conf.ldapSearchFilter == "" { + + return retrievedUserDN, "", nil +} + +// validateLDAPUserFetchOptions checks the ldap user search config values. +func (authManager *ldapAuthManager) validateLDAPUserFetchOptions() error { + const ldapOptionsErrorMsg = "ldap authentication params in HBA conf missing" + if authManager.mu.conf.ldapSearchFilter == "" { return errors.New(ldapOptionsErrorMsg + " search filter") } - if authenticator.mu.conf.ldapSearchAttribute == "" { + if authManager.mu.conf.ldapSearchAttribute == "" { return errors.New(ldapOptionsErrorMsg + " search attribute") } return nil } -// ValidateLDAPLogin validates an attempt to bind to an LDAP server. +// ValidateLDAPLogin validates an attempt to bind provided user DN to configured LDAP server. // In particular, it checks that: // * The cluster has an enterprise license. // * The active cluster version is 24.2 for this feature. -// * LDAP authentication is enabled after settings were reloaded. -// * The auth attempt is not for a reserved user. +// * LDAP authManager is enabled after settings were reloaded. // * The hba conf entry options could be parsed to obtain ldap server params. // * All ldap server params are valid. // * LDAPs connection can be established with configured server. -// * Configured bind DN and password can be used to search for the sql user DN on ldap server. -// * The obtained user DN could be used to bind with the password from sql connection string. +// * The provided user DN could be used to bind with the password from sql connection string. // It returns authError (which is the error sql clients will see in case of // failures) and detailedError (which is the internal error from ldap clients // that might contain sensitive information we do not want to send to sql // clients but still want to log it). We do not want to send any information // back to client which was not provided by the client. -func (authenticator *ldapAuthenticator) ValidateLDAPLogin( +func (authManager *ldapAuthManager) ValidateLDAPLogin( ctx context.Context, st *cluster.Settings, + ldapUserDN *ldap.DN, user username.SQLUsername, ldapPwd string, entry *hba.Entry, @@ -200,52 +162,36 @@ func (authenticator *ldapAuthenticator) ValidateLDAPLogin( return "", pgerror.Newf(pgcode.FeatureNotSupported, "LDAP authentication is only supported after v24.2 upgrade is finalized") } - authenticator.mu.Lock() - defer authenticator.mu.Unlock() + authManager.mu.Lock() + defer authManager.mu.Unlock() - if !authenticator.mu.enabled { + if !authManager.mu.enabled { return "", errors.Newf("LDAP authentication: not enabled") } - telemetry.Inc(beginAuthUseCounter) + telemetry.Inc(beginAuthNUseCounter) - if user.IsRootUser() || user.IsReserved() { - return "", errors.WithDetailf( - errors.Newf("LDAP authentication: invalid identity"), - "cannot use LDAP auth to login to a reserved user %s", user.Normalized()) - } - - if err := authenticator.setLDAPConfigOptions(entry); err != nil { + if err := authManager.setLDAPConfigOptions(entry); err != nil { return redact.Sprintf("error parsing hba conf options for LDAP: %v", err), errors.Newf("LDAP authentication: unable to parse hba conf options") } - if err := authenticator.validateLDAPOptions(); err != nil { - return redact.Sprintf("error validating hba conf options for LDAP: %v", err), - errors.Newf("LDAP authentication: unable to validate authenticator options") + if err := authManager.validateLDAPBaseOptions(); err != nil { + return redact.Sprintf("error validating base hba conf options for LDAP: %v", err), + errors.Newf("LDAP authentication: unable to validate authManager base options") } // Establish a LDAPs connection with the set LDAP server and port - err := authenticator.mu.util.InitLDAPsConn(ctx, authenticator.mu.conf) + err := authManager.mu.util.MaybeInitLDAPsConn(ctx, authManager.mu.conf) if err != nil { return redact.Sprintf("error when trying to create LDAP connection: %v", err), errors.Newf("LDAP authentication: unable to establish LDAP connection") } - // Fetch the ldap server Distinguished Name using sql username as search value - // for ldap search attribute - userDN, err := authenticator.mu.util.Search(ctx, authenticator.mu.conf, user.Normalized()) - if err != nil { - return redact.Sprintf("error when searching for user in LDAP server: %v", err), - errors.WithDetailf( - errors.Newf("LDAP authentication: unable to find LDAP user distinguished name"), - "cannot find provided user %s on LDAP server", user.Normalized()) - } - // Bind as the user to verify their password - err = authenticator.mu.util.Bind(ctx, userDN, ldapPwd) + err = authManager.mu.util.Bind(ctx, ldapUserDN.String(), ldapPwd) if err != nil { return redact.Sprintf("error when binding as user %s with DN(%s) in LDAP server: %v", - user.Normalized(), userDN, err, + user.Normalized(), ldapUserDN, err, ), errors.WithDetailf( errors.Newf("LDAP authentication: unable to bind as LDAP user"), @@ -255,24 +201,3 @@ func (authenticator *ldapAuthenticator) ValidateLDAPLogin( telemetry.Inc(loginSuccessUseCounter) return "", nil } - -// ConfigureLDAPAuth initializes and returns a ldapAuthenticator. It also sets up listeners so -// that the ldapAuthenticator's config is updated when the cluster settings values change. -var ConfigureLDAPAuth = func( - serverCtx context.Context, - ambientCtx log.AmbientContext, - st *cluster.Settings, - clusterUUID uuid.UUID, -) pgwire.LDAPVerifier { - authenticator := ldapAuthenticator{} - authenticator.clusterUUID = clusterUUID - authenticator.reloadConfig(serverCtx, st) - LDAPDomainCACertificate.SetOnChange(&st.SV, func(ctx context.Context) { - authenticator.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) - }) - return &authenticator -} - -func init() { - pgwire.ConfigureLDAPAuth = ConfigureLDAPAuth -} diff --git a/pkg/ccl/ldapccl/authentication_ldap_test.go b/pkg/ccl/ldapccl/authentication_ldap_test.go index 6725cf6a47b3..3c4b103de573 100644 --- a/pkg/ccl/ldapccl/authentication_ldap_test.go +++ b/pkg/ccl/ldapccl/authentication_ldap_test.go @@ -12,131 +12,33 @@ import ( "context" "crypto/tls" "fmt" - "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/security/distinguishedname" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" - "github.com/go-ldap/ldap/v3" "github.com/stretchr/testify/require" ) -const ( - emptyParam = "empty" - invalidParam = "invalid" -) - -type mockLDAPUtil struct { - conn *ldap.Conn - tlsConfig *tls.Config -} - -// InitLDAPsConn implements the ILDAPUtil interface. -func (lu *mockLDAPUtil) InitLDAPsConn(ctx context.Context, conf ldapAuthenticatorConf) error { - if strings.Contains(conf.ldapServer, invalidParam) { - return errors.Newf(ldapsFailureMessage + ": invalid ldap server provided") - } else if strings.Contains(conf.ldapPort, invalidParam) { - return errors.Newf(ldapsFailureMessage + ": invalid ldap port provided") - } - lu.conn = &ldap.Conn{} - return nil -} - -// Bind implements the ILDAPUtil interface. -func (lu *mockLDAPUtil) Bind(ctx context.Context, userDN string, ldapPwd string) error { - if strings.Contains(userDN, invalidParam) { - return errors.Newf(bindFailureMessage + ": invalid username provided") - } else if strings.Contains(ldapPwd, invalidParam) { - return errors.Newf(bindFailureMessage + ": invalid password provided") - } - - return nil -} - -// Search implements the ILDAPUtil interface. -func (lu *mockLDAPUtil) Search( - ctx context.Context, conf ldapAuthenticatorConf, username string, -) (userDN string, err error) { - if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil { - return "", errors.Wrap(err, searchFailureMessage) - } - if strings.Contains(conf.ldapBaseDN, invalidParam) { - return "", errors.Newf(searchFailureMessage+": invalid base DN %q provided", conf.ldapBaseDN) - } - if strings.Contains(conf.ldapSearchFilter, invalidParam) { - return "", errors.Newf(searchFailureMessage+": invalid search filter %q provided", conf.ldapSearchFilter) - } - if strings.Contains(conf.ldapSearchAttribute, invalidParam) { - return "", errors.Newf(searchFailureMessage+": invalid search attribute %q provided", conf.ldapSearchAttribute) - } - if strings.Contains(username, invalidParam) { - return "", errors.Newf(searchFailureMessage+": invalid search value %q provided", username) - } - distinguishedNames := strings.Split(username, ",") - switch { - case len(username) == 0: - return "", errors.Newf(searchFailureMessage+": user %q does not exist", username) - case len(distinguishedNames) > 1: - return "", errors.Newf(searchFailureMessage+": too many matching entries returned for user %q", username) - } - return distinguishedNames[0], nil -} - -var _ ILDAPUtil = &mockLDAPUtil{} - -func constructHBAEntry( - t *testing.T, - hbaEntryBase string, - hbaConfLDAPDefaultOpts map[string]string, - hbaConfLDAPOpts map[string]string, -) hba.Entry { - hbaEntryLDAP := hbaEntryBase - // add options from default and override default options when provided with one - for opt, value := range hbaConfLDAPDefaultOpts { - setValue := value - if hbaConfLDAPOpts[opt] == emptyParam { - continue - } else if hbaConfLDAPOpts[opt] != "" { - setValue = hbaConfLDAPOpts[opt] - } - hbaEntryLDAP += fmt.Sprintf("\"%s=%s\" ", opt, setValue) - } - // add non default options - for additionalOpt, additionalOptValue := range hbaConfLDAPOpts { - if _, ok := hbaConfLDAPDefaultOpts[additionalOpt]; !ok { - hbaEntryLDAP += fmt.Sprintf("\"%s=%s\" ", additionalOpt, additionalOptValue) - } - } - hbaConf, err := hba.ParseAndNormalize(hbaEntryLDAP) - if err != nil { - t.Fatalf("error parsing hba conf: %v", err) - } - if len(hbaConf.Entries) != 1 { - t.Fatalf("hba conf value invalid: should contain only 1 entry") - } - return hbaConf.Entries[0] -} - -func TestLDAPAuthentication(t *testing.T) { +func TestLDAPFetchUser(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Intercept the call to NewLDAPUtil and return the mocked NewLDAPUtil function defer testutils.TestingHook( &NewLDAPUtil, - func(ctx context.Context, conf ldapAuthenticatorConf) (ILDAPUtil, error) { + func(ctx context.Context, conf ldapConfig) (ILDAPUtil, error) { return &mockLDAPUtil{tlsConfig: &tls.Config{}}, nil })() ctx := context.Background() s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - verifier := ConfigureLDAPAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + manager := ConfigureLDAPAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) hbaEntryBase := "host all all all ldap " hbaConfLDAPDefaultOpts := map[string]string{ "ldapserver": "localhost", "ldapport": "636", "ldapbasedn": "dc=localhost", "ldapbinddn": "cn=readonly,dc=localhost", @@ -146,108 +48,180 @@ func TestLDAPAuthentication(t *testing.T) { testName string hbaConfLDAPOpts map[string]string user string - pwd string - ldapAuthSuccess bool + fetchUserSuccess bool expectedErr string expectedErrDetails string expectedDetailedErrMsg string }{ {testName: "proper hba conf and valid user cred", - user: "foo", pwd: "bar", ldapAuthSuccess: true}, + user: "foo", fetchUserSuccess: true}, {testName: "proper hba conf and root user cred", - user: "root", pwd: "bar", ldapAuthSuccess: false, + user: "root", fetchUserSuccess: false, expectedErr: "LDAP authentication: invalid identity", expectedErrDetails: "cannot use LDAP auth to login to a reserved user root"}, {testName: "proper hba conf and node user cred", - user: "node", pwd: "bar", ldapAuthSuccess: false, expectedErr: "LDAP authentication: invalid identity", + user: "node", fetchUserSuccess: false, expectedErr: "LDAP authentication: invalid identity", expectedErrDetails: "cannot use LDAP auth to login to a reserved user node"}, {testName: "invalid ldap option", - hbaConfLDAPOpts: map[string]string{"invalidOpt": "invalidVal"}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"invalidOpt": "invalidVal"}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to parse hba conf options", expectedDetailedErrMsg: `error parsing hba conf options for LDAP: invalid LDAP option provided in hba conf: ‹invalidOpt›`}, {testName: "empty server", - hbaConfLDAPOpts: map[string]string{"ldapserver": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing ldap server"}, + hbaConfLDAPOpts: map[string]string{"ldapserver": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap server"}, {testName: "invalid server", - hbaConfLDAPOpts: map[string]string{"ldapserver": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapserver": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to establish LDAP connection", expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap server provided"}, {testName: "empty port", - hbaConfLDAPOpts: map[string]string{"ldapport": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing ldap port"}, + hbaConfLDAPOpts: map[string]string{"ldapport": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap port"}, {testName: "invalid port", - hbaConfLDAPOpts: map[string]string{"ldapport": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapport": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to establish LDAP connection", expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap port provided"}, {testName: "empty base dn", - hbaConfLDAPOpts: map[string]string{"ldapbasedn": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing base DN"}, + hbaConfLDAPOpts: map[string]string{"ldapbasedn": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing base DN"}, {testName: "invalid base dn", - hbaConfLDAPOpts: map[string]string{"ldapbasedn": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapbasedn": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: invalid base DN ‹"invalid"› provided`}, {testName: "empty bind dn", - hbaConfLDAPOpts: map[string]string{"ldapbinddn": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing bind DN"}, + hbaConfLDAPOpts: map[string]string{"ldapbinddn": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing bind DN"}, {testName: "invalid bind dn", - hbaConfLDAPOpts: map[string]string{"ldapbinddn": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapbinddn": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo on LDAP server", expectedDetailedErrMsg: "error when searching for user in LDAP server: LDAP search failed: LDAP bind failed: invalid username provided"}, {testName: "empty bind pwd", - hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing bind password"}, + hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing bind password"}, {testName: "invalid bind pwd", - hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo on LDAP server", expectedDetailedErrMsg: "error when searching for user in LDAP server: LDAP search failed: LDAP bind failed: invalid password provided"}, {testName: "empty search attribute", - hbaConfLDAPOpts: map[string]string{"ldapsearchattribute": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing search attribute"}, + hbaConfLDAPOpts: map[string]string{"ldapsearchattribute": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager authentication options", + expectedDetailedErrMsg: "error validating authentication hba conf options for LDAP: ldap authentication params in HBA conf missing search attribute"}, {testName: "invalid search attribute", - hbaConfLDAPOpts: map[string]string{"ldapsearchattribute": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapsearchattribute": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: invalid search attribute ‹"invalid"› provided`}, {testName: "empty search filter", - hbaConfLDAPOpts: map[string]string{"ldapsearchfilter": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, - expectedErr: "LDAP authentication: unable to validate authenticator options", - expectedDetailedErrMsg: "error validating hba conf options for LDAP: ldap params in HBA conf missing search filter"}, + hbaConfLDAPOpts: map[string]string{"ldapsearchfilter": emptyParam}, user: "foo", fetchUserSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager authentication options", + expectedDetailedErrMsg: "error validating authentication hba conf options for LDAP: ldap authentication params in HBA conf missing search filter"}, {testName: "invalid search filter", - hbaConfLDAPOpts: map[string]string{"ldapsearchfilter": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + hbaConfLDAPOpts: map[string]string{"ldapsearchfilter": invalidParam}, user: "foo", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: invalid search filter ‹"invalid"› provided`}, {testName: "invalid ldap user", - user: invalidParam, pwd: "bar", ldapAuthSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", + user: invalidParam, fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user invalid on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: invalid search value ‹"invalid"› provided`}, {testName: "no such ldap user", - user: "", pwd: "bar", ldapAuthSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", + user: "", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: user ‹""› does not exist`}, {testName: "too many matching ldap users", - user: "foo,foo2,foo3", pwd: "bar", ldapAuthSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", + user: "foo,foo2,foo3", fetchUserSuccess: false, expectedErr: "LDAP authentication: unable to find LDAP user distinguished name", expectedErrDetails: "cannot find provided user foo,foo2,foo3 on LDAP server", expectedDetailedErrMsg: `error when searching for user in LDAP server: LDAP search failed: too many matching entries returned for user ‹"foo,foo2,foo3"›`}, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d: testName:%v hbConfOpts:%v user:%v fetchUserSuccess:%v", i, tc.testName, tc.hbaConfLDAPOpts, tc.user, tc.fetchUserSuccess), func(t *testing.T) { + hbaEntry := constructHBAEntry(t, hbaEntryBase, hbaConfLDAPDefaultOpts, tc.hbaConfLDAPOpts) + _, detailedErrorMsg, err := manager.FetchLDAPUserDN( + ctx, s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(tc.user), &hbaEntry, nil) + + if (err == nil) != tc.fetchUserSuccess { + t.Fatalf("expected success=%t, got err=%v", tc.fetchUserSuccess, err) + } + if err != nil { + require.Equal(t, tc.expectedErr, err.Error()) + require.Equal(t, tc.expectedErrDetails, errors.FlattenDetails(err)) + require.Equal(t, redact.RedactableString(tc.expectedDetailedErrMsg), detailedErrorMsg) + } + }) + } +} + +func TestLDAPAuthentication(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + // Intercept the call to NewLDAPUtil and return the mocked NewLDAPUtil function + defer testutils.TestingHook( + &NewLDAPUtil, + func(ctx context.Context, conf ldapConfig) (ILDAPUtil, error) { + return &mockLDAPUtil{tlsConfig: &tls.Config{}}, nil + })() + ctx := context.Background() + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + manager := ConfigureLDAPAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + hbaEntryBase := "host all all all ldap " + hbaConfLDAPDefaultOpts := map[string]string{ + "ldapserver": "localhost", "ldapport": "636", "ldapbasedn": "dc=localhost", "ldapbinddn": "cn=readonly,dc=localhost", + "ldapbindpasswd": "readonly_pwd", "ldapsearchattribute": "uid", "ldapsearchfilter": "(memberOf=cn=users,ou=groups,dc=localhost)", + } + testCases := []struct { + testName string + hbaConfLDAPOpts map[string]string + user string + pwd string + ldapAuthSuccess bool + expectedErr string + expectedErrDetails string + expectedDetailedErrMsg string + }{ + {testName: "proper hba conf and valid user cred", + user: "foo", pwd: "bar", ldapAuthSuccess: true}, + {testName: "invalid ldap option", + hbaConfLDAPOpts: map[string]string{"invalidOpt": "invalidVal"}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + expectedErr: "LDAP authentication: unable to parse hba conf options", + expectedDetailedErrMsg: `error parsing hba conf options for LDAP: invalid LDAP option provided in hba conf: ‹invalidOpt›`}, + {testName: "empty server", + hbaConfLDAPOpts: map[string]string{"ldapserver": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap server"}, + {testName: "invalid server", + hbaConfLDAPOpts: map[string]string{"ldapserver": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + expectedErr: "LDAP authentication: unable to establish LDAP connection", + expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap server provided"}, + {testName: "empty port", + hbaConfLDAPOpts: map[string]string{"ldapport": emptyParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + expectedErr: "LDAP authentication: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap port"}, + {testName: "invalid port", + hbaConfLDAPOpts: map[string]string{"ldapport": invalidParam}, user: "foo", pwd: "bar", ldapAuthSuccess: false, + expectedErr: "LDAP authentication: unable to establish LDAP connection", + expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap port provided"}, {testName: "invalid ldap password", user: "foo", pwd: invalidParam, ldapAuthSuccess: false, expectedErr: "LDAP authentication: unable to bind as LDAP user", expectedErrDetails: "credentials invalid for LDAP server user foo", - expectedDetailedErrMsg: `error when binding as user ‹foo› with DN(‹foo›) in LDAP server: LDAP bind failed: invalid password provided`}, + expectedDetailedErrMsg: `error when binding as user ‹foo› with DN(‹cn=foo›) in LDAP server: LDAP bind failed: invalid password provided`}, } for i, tc := range testCases { t.Run(fmt.Sprintf("%d: testName:%v hbConfOpts:%v user:%v password:%v", i, tc.testName, tc.hbaConfLDAPOpts, tc.user, tc.pwd), func(t *testing.T) { hbaEntry := constructHBAEntry(t, hbaEntryBase, hbaConfLDAPDefaultOpts, tc.hbaConfLDAPOpts) - detailedErrorMsg, err := verifier.ValidateLDAPLogin( - ctx, s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(tc.user), tc.pwd, &hbaEntry, nil) + ldapUserDN, err := distinguishedname.ParseDN("cn=" + tc.user) + if err != nil { + t.Fatalf("error parsing DN string for user %s: %v", tc.user, err) + } + detailedErrorMsg, err := manager.ValidateLDAPLogin( + ctx, s.ClusterSettings(), ldapUserDN, username.MakeSQLUsernameFromPreNormalizedString(tc.user), tc.pwd, &hbaEntry, nil) if (err == nil) != tc.ldapAuthSuccess { t.Fatalf("expected success=%t, got err=%v", tc.ldapAuthSuccess, err) diff --git a/pkg/ccl/ldapccl/authorization_ldap.go b/pkg/ccl/ldapccl/authorization_ldap.go new file mode 100644 index 000000000000..d77622bc7352 --- /dev/null +++ b/pkg/ccl/ldapccl/authorization_ldap.go @@ -0,0 +1,143 @@ +// Copyright 2024 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package ldapccl + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/security/distinguishedname" + "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" + "github.com/go-ldap/ldap/v3" +) + +const ( + beginAuthZCounterName = counterPrefix + "begin_authorization" + authZSuccessCounterName = counterPrefix + "authorization_success" +) + +var ( + beginAuthZUseCounter = telemetry.GetCounterOnce(beginAuthZCounterName) + authZSuccessCounter = telemetry.GetCounterOnce(authZSuccessCounterName) +) + +// validateLDAPAuthZOptions checks the ldap authorization config values. +func (authManager *ldapAuthManager) validateLDAPAuthZOptions() error { + const ldapOptionsErrorMsg = "ldap authorization params in HBA conf missing" + if authManager.mu.conf.ldapGroupListFilter == "" { + return errors.New(ldapOptionsErrorMsg + " group list attribute") + } + return nil +} + +// FetchLDAPGroups retrieves ldap groups for supplied ldap user DN. +// In particular, it checks that: +// * The cluster has an enterprise license. +// * The active cluster version is 24.3 for this feature. +// * The provided LDAP user distinguished name is a valid DN. +// * LDAP authManager is enabled after settings were reloaded. +// * The hba conf entry options could be parsed to obtain ldap server params. +// * All ldap server params are valid. +// * LDAPs connection can be established with configured server. +// * Configured bind DN and password can be used to fetch ldap groups for provided user DN. +// It returns the ldap groups DN list for which the user is a member, authError +// (which is the error sql clients will see in case of failures) and +// detailedError (which is the internal error from ldap clients that might +// contain sensitive information we do not want to send to sql clients but still +// want to log it). We do not want to send any information back to client which +// was not provided by the client. +// +// Example authorization example for obtaining LDAP groups for LDAP user: +// if ldapGroups, detailedErrors, authError := ldapManager.m.FetchLDAPGroups(ctx, execCfg.Settings, externalUserDN, entry, identMap); authError != nil { +// errForLog := authError +// if detailedErrors != "" { +// errForLog = errors.Join(errForLog, errors.Newf("%s", detailedErrors)) +// } +// log.Warningf(ctx, "error retrieving ldap groups for authZ: %+v", errForLog) +// } else { +// log.Infof(ctx, "LDAP authorization: retrieved ldap groups are %+v", ldapGroups) +// } +func (authManager *ldapAuthManager) FetchLDAPGroups( + ctx context.Context, + st *cluster.Settings, + userDN *ldap.DN, + user username.SQLUsername, + entry *hba.Entry, + _ *identmap.Conf, +) (_ []*ldap.DN, detailedErrorMsg redact.RedactableString, authError error) { + if err := utilccl.CheckEnterpriseEnabled(st, "LDAP authorization"); err != nil { + return nil, "", err + } + if !st.Version.IsActive(ctx, clusterversion.V24_3) { + return nil, "", pgerror.Newf(pgcode.FeatureNotSupported, "LDAP authorization is only supported after v24.3 upgrade is finalized") + } + + authManager.mu.Lock() + defer authManager.mu.Unlock() + + if !authManager.mu.enabled { + return nil, "", errors.Newf("LDAP authentication: not enabled") + } + telemetry.Inc(beginAuthZUseCounter) + + if err := authManager.setLDAPConfigOptions(entry); err != nil { + return nil, redact.Sprintf("error parsing hba conf options for LDAP: %v", err), + errors.Newf("LDAP authorization: unable to parse hba conf options") + } + + if err := authManager.validateLDAPBaseOptions(); err != nil { + return nil, redact.Sprintf("error validating base hba conf options for LDAP: %v", err), + errors.Newf("LDAP authorization: unable to validate authManager base options") + } + + if err := authManager.validateLDAPAuthZOptions(); err != nil { + return nil, redact.Sprintf("error validating authorization hba conf options for LDAP: %v", err), + errors.Newf("LDAP authorization: unable to validate authManager authorization options") + } + + // Establish a LDAPs connection with the set LDAP server and port + err := authManager.mu.util.MaybeInitLDAPsConn(ctx, authManager.mu.conf) + if err != nil { + return nil, redact.Sprintf("error when trying to create LDAP connection: %v", err), + errors.Newf("LDAP authorization: unable to establish LDAP connection") + } + + // Fetch the ldap server Distinguished Name using sql username as search value + // for ldap search attribute + fetchedGroups, err := authManager.mu.util.ListGroups(ctx, authManager.mu.conf, userDN.String()) + if err != nil { + return nil, redact.Sprintf("error when fetching groups for user dn %q in LDAP server: %v", userDN.String(), err), + errors.WithDetailf( + errors.Newf("LDAP authorization: unable to fetch groups for user"), + "cannot find groups for which user is a member") + } + + ldapGroups := make([]*ldap.DN, len(fetchedGroups)) + for idx := range fetchedGroups { + ldapGroups[idx], err = distinguishedname.ParseDN(fetchedGroups[idx]) + if err != nil { + return nil, redact.Sprintf("error parsing member group DN %s obtained from LDAP server: %v", ldapGroups[idx], err), + errors.WithDetailf( + errors.Newf("LDAP authentication: unable to parse member LDAP group distinguished name"), + "cannot find provided user %s on LDAP server", user.Normalized()) + } + } + + telemetry.Inc(authZSuccessCounter) + return ldapGroups, "", nil +} diff --git a/pkg/ccl/ldapccl/authorization_ldap_test.go b/pkg/ccl/ldapccl/authorization_ldap_test.go new file mode 100644 index 000000000000..baad9db7ae3b --- /dev/null +++ b/pkg/ccl/ldapccl/authorization_ldap_test.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package ldapccl + +import ( + "context" + "crypto/tls" + "fmt" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/security/distinguishedname" + "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" + "github.com/stretchr/testify/require" +) + +func TestLDAPAuthorization(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + // Intercept the call to NewLDAPUtil and return the mocked NewLDAPUtil function + defer testutils.TestingHook( + &NewLDAPUtil, + func(ctx context.Context, conf ldapConfig) (ILDAPUtil, error) { + return &mockLDAPUtil{tlsConfig: &tls.Config{}}, nil + })() + ctx := context.Background() + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + manager := ConfigureLDAPAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + hbaEntryBase := "host all all all ldap " + hbaConfLDAPDefaultOpts := map[string]string{ + "ldapserver": "localhost", "ldapport": "636", "ldapbasedn": "dc=localhost", "ldapbinddn": "cn=readonly,dc=localhost", + "ldapbindpasswd": "readonly_pwd", "ldapgrouplistfilter": "(objectCategory=cn=Group,cn=Schema,cn=Configuration,DC=crlcloud,DC=dev)", + } + testCases := []struct { + testName string + hbaConfLDAPOpts map[string]string + user string + authZSuccess bool + ldapGroups []string + expectedErr string + expectedErrDetails string + expectedDetailedErrMsg string + }{ + {testName: "proper hba conf and valid user cred", + user: "cn=foo", authZSuccess: true, ldapGroups: []string{"cn=foo"}}, + {testName: "proper hba conf and invalid distinguished name", + user: "cn=invalid", authZSuccess: false, + expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=invalid"› in LDAP server: LDAP groups list failed: invalid user DN ‹"cn=invalid"› provided`}, + {testName: "invalid ldap option", + hbaConfLDAPOpts: map[string]string{"invalidOpt": "invalidVal"}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to parse hba conf options", + expectedDetailedErrMsg: `error parsing hba conf options for LDAP: invalid LDAP option provided in hba conf: ‹invalidOpt›`}, + {testName: "empty server", + hbaConfLDAPOpts: map[string]string{"ldapserver": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap server"}, + {testName: "invalid server", + hbaConfLDAPOpts: map[string]string{"ldapserver": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to establish LDAP connection", + expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap server provided"}, + {testName: "empty port", + hbaConfLDAPOpts: map[string]string{"ldapport": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing ldap port"}, + {testName: "invalid port", + hbaConfLDAPOpts: map[string]string{"ldapport": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to establish LDAP connection", + expectedDetailedErrMsg: "error when trying to create LDAP connection: LDAPs connection failed: invalid ldap port provided"}, + {testName: "empty base dn", + hbaConfLDAPOpts: map[string]string{"ldapbasedn": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing base DN"}, + {testName: "invalid base dn", + hbaConfLDAPOpts: map[string]string{"ldapbasedn": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: invalid base DN ‹"invalid"› provided`}, + {testName: "empty bind dn", + hbaConfLDAPOpts: map[string]string{"ldapbinddn": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing bind DN"}, + {testName: "invalid bind dn", + hbaConfLDAPOpts: map[string]string{"ldapbinddn": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: LDAP bind failed: invalid username provided`}, + {testName: "empty bind pwd", + hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager base options", + expectedDetailedErrMsg: "error validating base hba conf options for LDAP: ldap params in HBA conf missing bind password"}, + {testName: "invalid bind pwd", + hbaConfLDAPOpts: map[string]string{"ldapbindpasswd": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: LDAP bind failed: invalid password provided`}, + {testName: "empty group list filter", + hbaConfLDAPOpts: map[string]string{"ldapgrouplistfilter": emptyParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to validate authManager authorization options", + expectedDetailedErrMsg: "error validating authorization hba conf options for LDAP: ldap authorization params in HBA conf missing group list attribute"}, + {testName: "invalid group list filter", + hbaConfLDAPOpts: map[string]string{"ldapgrouplistfilter": invalidParam}, user: "cn=foo", authZSuccess: false, + expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹"cn=foo"› in LDAP server: LDAP groups list failed: invalid group list filter ‹"invalid"› provided`}, + {testName: "no matching ldap groups", + user: "", authZSuccess: false, expectedErr: "LDAP authorization: unable to fetch groups for user", + expectedErrDetails: "cannot find groups for which user is a member", + expectedDetailedErrMsg: `error when fetching groups for user dn ‹""› in LDAP server: LDAP groups list failed: user dn ‹""› does not belong to any groups`}, + {testName: "more than 1 matching ldap groups", + user: "o=foo,ou=foo2,cn=foo3", authZSuccess: true, ldapGroups: []string{"o=foo", "ou=foo2", "cn=foo3"}}, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d: testName:%v hbConfOpts:%v user:%v", i, tc.testName, tc.hbaConfLDAPOpts, tc.user), func(t *testing.T) { + hbaEntry := constructHBAEntry(t, hbaEntryBase, hbaConfLDAPDefaultOpts, tc.hbaConfLDAPOpts) + userDN, err := distinguishedname.ParseDN(tc.user) + if err != nil { + t.Fatalf("error parsing DN string for user DN %s: %v", tc.user, err) + } + + retrievedLDAPGroups, detailedErrorMsg, err := manager.FetchLDAPGroups( + ctx, s.ClusterSettings(), userDN, username.MakeSQLUsernameFromPreNormalizedString("foo"), &hbaEntry, nil) + + if (err == nil) != tc.authZSuccess { + t.Fatalf("expected success=%t, got err=%v", tc.authZSuccess, err) + } + if err != nil { + require.Equal(t, tc.expectedErr, err.Error()) + require.Equal(t, tc.expectedErrDetails, errors.FlattenDetails(err)) + require.Equal(t, redact.RedactableString(tc.expectedDetailedErrMsg), detailedErrorMsg) + } else { + require.Equal(t, len(tc.ldapGroups), len(retrievedLDAPGroups)) + for idx := range retrievedLDAPGroups { + require.Equal(t, tc.ldapGroups[idx], retrievedLDAPGroups[idx].String()) + } + } + }) + } +} diff --git a/pkg/ccl/ldapccl/ldap_manager.go b/pkg/ccl/ldapccl/ldap_manager.go new file mode 100644 index 000000000000..a97416e0ef21 --- /dev/null +++ b/pkg/ccl/ldapccl/ldap_manager.go @@ -0,0 +1,178 @@ +// Copyright 2024 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package ldapccl + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/errors" +) + +const ( + counterPrefix = "auth.ldap." + enableCounterName = counterPrefix + "enable" +) + +var enableUseCounter = telemetry.GetCounterOnce(enableCounterName) + +// ldapAuthManager is an object that is used for both: +// 1. enabling ldap connection validation that are used as part of the CRDB +// client auth flow. +// 2. facilitating authorization to fetch parent groups as part of CRDB role +// privilege resolution. +// +// The implementation uses the `go-ldap/ldap/` client package and is supported +// through a number of cluster settings defined in `ldapccl/settings.go`. These +// settings specify how the ldap auth attempt should be executed and if this +// feature is enabled. A common ldapAuthManager object is used for both authN +// and authZ to reduce redundancy of cluster settings listeners and auth +// parameter configurations. +type ldapAuthManager struct { + mu struct { + syncutil.RWMutex + // conf contains all the values that come from cluster settings. + conf ldapConfig + // util contains connection object required for interfacing with ldap server. + util ILDAPUtil + // enabled represents the present state of if this feature is enabled. It + // is set to true once ldap util is initialized. + enabled bool + } + // clusterUUID is used to check the validity of the enterprise license. It is + // set once at initialization. + clusterUUID uuid.UUID +} + +// ldapConfig contains all the values to configure LDAP authN and authZ. These +// values are set using matching cluster settings or from hba conf options for +// LDAP entry. +type ldapConfig struct { + domainCACert string + clientTLSCert string + clientTLSKey string + ldapServer string + ldapPort string + ldapBaseDN string + ldapBindDN string + ldapBindPassword string + ldapSearchFilter string + ldapSearchAttribute string + ldapGroupListFilter string +} + +// reloadConfig locks mutex and then refreshes the values in conf from the cluster settings. +func (authManager *ldapAuthManager) reloadConfig(ctx context.Context, st *cluster.Settings) { + authManager.mu.Lock() + defer authManager.mu.Unlock() + authManager.reloadConfigLocked(ctx, st) +} + +// reloadConfig refreshes the values in conf from the cluster settings without locking the mutex. +func (authManager *ldapAuthManager) reloadConfigLocked(ctx context.Context, st *cluster.Settings) { + conf := ldapConfig{ + domainCACert: LDAPDomainCACertificate.Get(&st.SV), + clientTLSCert: LDAPClientTLSCertSetting.Get(&st.SV), + clientTLSKey: LDAPClientTLSKeySetting.Get(&st.SV), + } + authManager.mu.conf = conf + + var err error + authManager.mu.util, err = NewLDAPUtil(ctx, authManager.mu.conf) + if err != nil { + log.Warningf(ctx, "LDAP auth manager: unable to initialize LDAP connection: %v", err) + return + } + + if !authManager.mu.enabled { + telemetry.Inc(enableUseCounter) + } + authManager.mu.enabled = true + log.Infof(ctx, "initialized LDAP authManager") +} + +// setLDAPConfigOptions extracts hba conf parameters required for connecting and +// querying LDAP server from hba conf entry and sets them for LDAP auth. +func (authManager *ldapAuthManager) setLDAPConfigOptions(entry *hba.Entry) error { + conf := ldapConfig{ + domainCACert: authManager.mu.conf.domainCACert, + } + for _, opt := range entry.Options { + switch opt[0] { + case "ldapserver": + conf.ldapServer = opt[1] + case "ldapport": + conf.ldapPort = opt[1] + case "ldapbasedn": + conf.ldapBaseDN = opt[1] + case "ldapbinddn": + conf.ldapBindDN = opt[1] + case "ldapbindpasswd": + conf.ldapBindPassword = opt[1] + case "ldapsearchfilter": + conf.ldapSearchFilter = opt[1] + case "ldapsearchattribute": + conf.ldapSearchAttribute = opt[1] + case "ldapgrouplistfilter": + conf.ldapGroupListFilter = opt[1] + default: + return errors.Newf("invalid LDAP option provided in hba conf: %s", opt[0]) + } + } + authManager.mu.conf = conf + return nil +} + +// validateLDAPBaseOptions checks the mandatory ldap auth config values for validity. +func (authManager *ldapAuthManager) validateLDAPBaseOptions() error { + const ldapOptionsErrorMsg = "ldap params in HBA conf missing" + if authManager.mu.conf.ldapServer == "" { + return errors.New(ldapOptionsErrorMsg + " ldap server") + } + if authManager.mu.conf.ldapPort == "" { + return errors.New(ldapOptionsErrorMsg + " ldap port") + } + if authManager.mu.conf.ldapBaseDN == "" { + return errors.New(ldapOptionsErrorMsg + " base DN") + } + if authManager.mu.conf.ldapBindDN == "" { + return errors.New(ldapOptionsErrorMsg + " bind DN") + } + if authManager.mu.conf.ldapBindPassword == "" { + return errors.New(ldapOptionsErrorMsg + " bind password") + } + return nil +} + +// ConfigureLDAPAuth initializes and returns a ldapAuthManager. It also sets up listeners so +// that the ldapAuthManager's config is updated when the cluster settings values change. +var ConfigureLDAPAuth = func( + serverCtx context.Context, + ambientCtx log.AmbientContext, + st *cluster.Settings, + clusterUUID uuid.UUID, +) pgwire.LDAPManager { + authManager := ldapAuthManager{} + authManager.clusterUUID = clusterUUID + authManager.reloadConfig(serverCtx, st) + LDAPDomainCACertificate.SetOnChange(&st.SV, func(ctx context.Context) { + authManager.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) + }) + return &authManager +} + +func init() { + pgwire.ConfigureLDAPAuth = ConfigureLDAPAuth +} diff --git a/pkg/ccl/ldapccl/ldap_test_util.go b/pkg/ccl/ldapccl/ldap_test_util.go new file mode 100644 index 000000000000..ea0b7e5d8b45 --- /dev/null +++ b/pkg/ccl/ldapccl/ldap_test_util.go @@ -0,0 +1,147 @@ +// Copyright 2024 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package ldapccl + +import ( + "context" + "crypto/tls" + "fmt" + "strings" + "testing" + + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" + "github.com/cockroachdb/errors" + "github.com/go-ldap/ldap/v3" +) + +const ( + emptyParam = "empty" + invalidParam = "invalid" +) + +type mockLDAPUtil struct { + conn *ldap.Conn + tlsConfig *tls.Config +} + +// MaybeInitLDAPsConn implements the ILDAPUtil interface. +func (lu *mockLDAPUtil) MaybeInitLDAPsConn(ctx context.Context, conf ldapConfig) error { + if strings.Contains(conf.ldapServer, invalidParam) { + return errors.Newf(ldapsFailureMessage + ": invalid ldap server provided") + } else if strings.Contains(conf.ldapPort, invalidParam) { + return errors.Newf(ldapsFailureMessage + ": invalid ldap port provided") + } + lu.conn = &ldap.Conn{} + return nil +} + +// Bind implements the ILDAPUtil interface. +func (lu *mockLDAPUtil) Bind(ctx context.Context, userDN string, ldapPwd string) error { + if strings.Contains(userDN, invalidParam) { + return errors.Newf(bindFailureMessage + ": invalid username provided") + } else if strings.Contains(ldapPwd, invalidParam) { + return errors.Newf(bindFailureMessage + ": invalid password provided") + } + + return nil +} + +// Search implements the ILDAPUtil interface. +func (lu *mockLDAPUtil) Search( + ctx context.Context, conf ldapConfig, username string, +) (userDN string, err error) { + if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil { + return "", errors.Wrap(err, searchFailureMessage) + } + if strings.Contains(conf.ldapBaseDN, invalidParam) { + return "", errors.Newf(searchFailureMessage+": invalid base DN %q provided", conf.ldapBaseDN) + } + if strings.Contains(conf.ldapSearchFilter, invalidParam) { + return "", errors.Newf(searchFailureMessage+": invalid search filter %q provided", conf.ldapSearchFilter) + } + if strings.Contains(conf.ldapSearchAttribute, invalidParam) { + return "", errors.Newf(searchFailureMessage+": invalid search attribute %q provided", conf.ldapSearchAttribute) + } + if strings.Contains(username, invalidParam) { + return "", errors.Newf(searchFailureMessage+": invalid search value %q provided", username) + } + commonNames := strings.Split(username, ",") + switch { + case len(username) == 0: + return "", errors.Newf(searchFailureMessage+": user %q does not exist", username) + case len(commonNames) > 1: + return "", errors.Newf(searchFailureMessage+": too many matching entries returned for user %q", username) + } + + distinguishedName := "CN=" + commonNames[0] + return distinguishedName, nil +} + +// ListGroups implements the ILDAPUtil interface. +func (lu *mockLDAPUtil) ListGroups( + ctx context.Context, conf ldapConfig, userDN string, +) (ldapGroupsDN []string, err error) { + if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil { + return nil, errors.Wrap(err, groupListFailureMessage) + } + if strings.Contains(conf.ldapBaseDN, invalidParam) { + return nil, errors.Newf(groupListFailureMessage+": invalid base DN %q provided", conf.ldapBaseDN) + } + if strings.Contains(conf.ldapSearchFilter, invalidParam) { + return nil, errors.Newf(groupListFailureMessage+": invalid search filter %q provided", conf.ldapSearchFilter) + } + if strings.Contains(conf.ldapGroupListFilter, invalidParam) { + return nil, errors.Newf(groupListFailureMessage+": invalid group list filter %q provided", conf.ldapGroupListFilter) + } + if strings.Contains(userDN, invalidParam) { + return nil, errors.Newf(groupListFailureMessage+": invalid user DN %q provided", userDN) + } + + if len(userDN) == 0 { + return nil, errors.Newf(groupListFailureMessage+": user dn %q does not belong to any groups", userDN) + } + + ldapGroupsDN = strings.Split(userDN, ",") + return ldapGroupsDN, nil +} + +var _ ILDAPUtil = &mockLDAPUtil{} + +func constructHBAEntry( + t *testing.T, + hbaEntryBase string, + hbaConfLDAPDefaultOpts map[string]string, + hbaConfLDAPOpts map[string]string, +) hba.Entry { + hbaEntryLDAP := hbaEntryBase + // add options from default and override default options when provided with one + for opt, value := range hbaConfLDAPDefaultOpts { + setValue := value + if hbaConfLDAPOpts[opt] == emptyParam { + continue + } else if hbaConfLDAPOpts[opt] != "" { + setValue = hbaConfLDAPOpts[opt] + } + hbaEntryLDAP += fmt.Sprintf("\"%s=%s\" ", opt, setValue) + } + // add non default options + for additionalOpt, additionalOptValue := range hbaConfLDAPOpts { + if _, ok := hbaConfLDAPDefaultOpts[additionalOpt]; !ok { + hbaEntryLDAP += fmt.Sprintf("\"%s=%s\" ", additionalOpt, additionalOptValue) + } + } + hbaConf, err := hba.ParseAndNormalize(hbaEntryLDAP) + if err != nil { + t.Fatalf("error parsing hba conf: %v", err) + } + if len(hbaConf.Entries) != 1 { + t.Fatalf("hba conf value invalid: should contain only 1 entry") + } + return hbaConf.Entries[0] +} diff --git a/pkg/ccl/ldapccl/ldap_util.go b/pkg/ccl/ldapccl/ldap_util.go index 6cdef4d3a29c..2471a37c3e30 100644 --- a/pkg/ccl/ldapccl/ldap_util.go +++ b/pkg/ccl/ldapccl/ldap_util.go @@ -19,10 +19,11 @@ import ( ) const ( - invalidLDAPConfMessage = "LDAP configuration invalid" - ldapsFailureMessage = "LDAPs connection failed" - bindFailureMessage = "LDAP bind failed" - searchFailureMessage = "LDAP search failed" + invalidLDAPConfMessage = "LDAP configuration invalid" + ldapsFailureMessage = "LDAPs connection failed" + bindFailureMessage = "LDAP bind failed" + searchFailureMessage = "LDAP search failed" + groupListFailureMessage = "LDAP groups list failed" ) type ldapUtil struct { @@ -30,14 +31,14 @@ type ldapUtil struct { tlsConfig *tls.Config } -// InitLDAPsConn implements the ILDAPUtil interface. -func (lu *ldapUtil) InitLDAPsConn(ctx context.Context, conf ldapAuthenticatorConf) (err error) { +// MaybeInitLDAPsConn implements the ILDAPUtil interface. +func (lu *ldapUtil) MaybeInitLDAPsConn(ctx context.Context, conf ldapConfig) (err error) { // TODO(souravcrl): (Fix 1) DialTLS is slow if we do it for every authN // attempt. We should look into ways for caching connections and avoiding // connection timeouts in case LDAP server enforces that for idle connections. // We still should be able to validate a large number of authN requests // reusing the same connection(s). - // (Fix 2) Every authN attempt acquires a lock on ldapAuthenticator, so + // (Fix 2) Every authN attempt acquires a lock on ldapAuthManager, so // only 1 authN attempt is possible at a given time(for entire flow of // bind+search+bind). We should have a permanent bind connection to search for // entries and short-lived bind attempts for requested sql authNs. @@ -47,6 +48,10 @@ func (lu *ldapUtil) InitLDAPsConn(ctx context.Context, conf ldapAuthenticatorCon // connections crdb nodes can take up(either in total or on a per node basis) // // ldapAddress := "ldap://ldap.example.com:636" + // + if lu.conn != nil { + return nil + } ldapAddress := conf.ldapServer + ":" + conf.ldapPort if lu.conn, err = ldap.DialTLS("tcp", ldapAddress, lu.tlsConfig); err != nil { return errors.Wrap(err, ldapsFailureMessage) @@ -64,7 +69,7 @@ func (lu *ldapUtil) Bind(ctx context.Context, userDN string, ldapPwd string) (er // Search implements the ILDAPUtil interface. func (lu *ldapUtil) Search( - ctx context.Context, conf ldapAuthenticatorConf, username string, + ctx context.Context, conf ldapConfig, username string, ) (userDN string, err error) { if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil { return "", errors.Wrap(err, searchFailureMessage) @@ -94,19 +99,56 @@ func (lu *ldapUtil) Search( return sr.Entries[0].DN, nil } +// ListGroups implements the ILDAPUtil interface. +func (lu *ldapUtil) ListGroups( + ctx context.Context, conf ldapConfig, userDN string, +) (_ []string, err error) { + if err := lu.Bind(ctx, conf.ldapBindDN, conf.ldapBindPassword); err != nil { + return nil, errors.Wrap(err, groupListFailureMessage) + } + // TODO(souravcrl): Currently list groups can only be performed at subtree + // level but this should be configurable through HBA conf using any of the + // scopes provided: + // https://github.com/go-ldap/ldap/blob/master/search.go#L17-L24 + searchRequest := ldap.NewSearchRequest( + conf.ldapBaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&%s(member=%s))", conf.ldapGroupListFilter, ldap.EscapeFilter(userDN)), + []string{}, + nil, + ) + sr, err := lu.conn.Search(searchRequest) + if err != nil { + return nil, errors.Wrap(err, groupListFailureMessage) + } + if len(sr.Entries) == 0 { + return nil, errors.Newf(groupListFailureMessage+": user dn %s does not belong to any groups", userDN) + } + + ldapGroupsDN := make([]string, len(sr.Entries)) + for idx := range sr.Entries { + ldapGroupsDN[idx] = sr.Entries[idx].DN + } + return ldapGroupsDN, nil +} + // ILDAPUtil is an interface for the `ldapauthccl` library to wrap various LDAP // functionalities exposed by `go-ldap` library as part of CRDB modules for // authN and authZ. type ILDAPUtil interface { - // InitLDAPsConn creates a mTLS connection with the LDAP server taking - // arguments for domain CA, ldap client key and cert, ldap server & port - InitLDAPsConn(ctx context.Context, conf ldapAuthenticatorConf) error + // MaybeInitLDAPsConn optionally creates a mTLS connection with the LDAP + // server if it does not already exist taking arguments for domain CA, ldap + // client key and cert, ldap server & port + MaybeInitLDAPsConn(ctx context.Context, conf ldapConfig) error // Bind performs a bind given a valid DN and LDAP password Bind(ctx context.Context, userDN string, ldapPwd string) error // Search performs search on LDAP server binding with bindDN and bindpwd // expecting search arguments from HBA conf and crdb database connection // string and returns the ldap userDN. - Search(ctx context.Context, conf ldapAuthenticatorConf, username string) (userDN string, err error) + Search(ctx context.Context, conf ldapConfig, username string) (userDN string, err error) + // ListGroups performs search on AD subtree starting from baseDN filtered by + // groupListFilter and lists groups which have provided userDN as a member + ListGroups(ctx context.Context, conf ldapConfig, userDN string) (ldapGroupsDN []string, err error) } var _ ILDAPUtil = &ldapUtil{} @@ -115,9 +157,9 @@ var _ ILDAPUtil = &ldapUtil{} // client interface provided by `go-ldap`. This is needed for testing (to // intercept the call to NewLDAPUtil and return the mocked NewLDAPUtil which has // mock implementations for ILDAPUtil interface). -var NewLDAPUtil func(context.Context, ldapAuthenticatorConf) (ILDAPUtil, error) = func( +var NewLDAPUtil func(context.Context, ldapConfig) (ILDAPUtil, error) = func( ctx context.Context, - conf ldapAuthenticatorConf, + conf ldapConfig, ) (ILDAPUtil, error) { util := ldapUtil{tlsConfig: &tls.Config{}} diff --git a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant index 4871a3b224eb..9ac3663ef769 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant +++ b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal_tenant @@ -55,6 +55,7 @@ crdb_internal databases table node NULL N crdb_internal default_privileges table node NULL NULL crdb_internal feature_usage table node NULL NULL crdb_internal forward_dependencies table node NULL NULL +crdb_internal fully_qualified_names view node NULL NULL crdb_internal gossip_alerts table node NULL NULL crdb_internal gossip_liveness table node NULL NULL crdb_internal gossip_network table node NULL NULL @@ -357,12 +358,12 @@ txn_id txn_fingerprint_id query implicit_txn session_id start_time end_tim query ITTI SELECT range_id, start_pretty, end_pretty, lease_holder FROM crdb_internal.ranges ---- -69 /Tenant/10 /Tenant/11 1 +70 /Tenant/10 /Tenant/11 1 query ITT SELECT range_id, start_pretty, end_pretty FROM crdb_internal.ranges_no_leases ---- -69 /Tenant/10 /Tenant/11 +70 /Tenant/10 /Tenant/11 query IT SELECT zone_id, target FROM crdb_internal.zones ORDER BY 1 diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region b/pkg/ccl/logictestccl/testdata/logic_test/multi_region index dd9ce16cdd49..408ff7127cf9 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region @@ -1614,4 +1614,3 @@ statement ok ALTER DATABASE drop_region_126549 DROP REGION "us-east-1"; subtest end - diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error index 9acc9c61e1a0..a14fe6b8557b 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_remote_access_error @@ -348,8 +348,10 @@ sleep 5s statement ok SET enforce_home_region = true -statement ok +query T noticetrace SET enforce_home_region_follower_reads_enabled = true +---- +NOTICE: enforce_home_region_follower_reads_enabled is deprecated and will be removed in a future release # An insert with uniqueness checks which access all regions should error out. retry @@ -463,7 +465,7 @@ skipif config multiregion-9node-3region-3azs-vec-off query I retry SELECT DISTINCT range_id FROM [SHOW RANGES FROM TABLE messages_rbr] ---- -74 +75 # Update does not fail when accessing all rows in messages_rbr because lookup # join does not error out the lookup table in phase 1. diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs index 07eec48ac7b9..2f5efacb9af4 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs @@ -158,7 +158,11 @@ DATABASE "mr-zone-configs" ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USIN statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING num_voters = 5; + +statement ok SET override_multi_region_zone_config = false statement error zone configuration for database "mr-zone-configs" contains incorrectly configured field "num_voters" @@ -199,12 +203,20 @@ ALTER DATABASE "mr-zone-configs" DROP REGION "ap-southeast-2" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" DROP REGION "ap-southeast-2"; + +statement ok SET override_multi_region_zone_config = false statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING global_reads = true; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone configuration for database "mr-zone-configs" which contains modified field "global_reads" @@ -212,7 +224,11 @@ ALTER DATABASE "mr-zone-configs" ADD REGION "ap-southeast-2" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" ADD REGION "ap-southeast-2"; + +statement ok SET override_multi_region_zone_config = false # Zone config is unmodified now. We don't need to override. @@ -221,12 +237,20 @@ ALTER DATABASE "mr-zone-configs" DROP REGION "ap-southeast-2" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" ADD REGION "ap-southeast-2"; + +statement ok SET override_multi_region_zone_config = false statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING num_replicas = 7; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone configuration for database "mr-zone-configs" which contains modified field "num_replicas" @@ -234,7 +258,11 @@ ALTER DATABASE "mr-zone-configs" SET PRIMARY REGION "us-east-1" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" SET PRIMARY REGION "us-east-1"; + +statement ok SET override_multi_region_zone_config = false query TT @@ -256,7 +284,11 @@ ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING num_replicas = 7, gc.ttlse statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 100000; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone configuration for database "mr-zone-configs" which contains modified field "num_replicas" @@ -264,12 +296,20 @@ ALTER DATABASE "mr-zone-configs" SURVIVE REGION FAILURE statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" SURVIVE REGION FAILURE; + +statement ok SET override_multi_region_zone_config = false statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" SURVIVE ZONE FAILURE; + +statement ok SET override_multi_region_zone_config = false statement error attempting to modify protected field "constraints" of a multi-region zone configuration @@ -277,7 +317,11 @@ ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING constraints = '{+region=us statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING constraints = '{+region=us-east-1: 3}'; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone configuration for database "mr-zone-configs" which contains modified field "constraints" @@ -285,7 +329,11 @@ ALTER DATABASE "mr-zone-configs" DROP REGION "ap-southeast-2" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" DROP REGION "ap-southeast-2"; + +statement ok SET override_multi_region_zone_config = false statement error attempting to modify protected field "voter_constraints" of a multi-region zone configuration @@ -293,7 +341,11 @@ ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING voter_constraints = '[+reg statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING voter_constraints = '[+region=ap-southeast-2]'; + +statement ok SET override_multi_region_zone_config = false query TT @@ -314,7 +366,11 @@ ALTER DATABASE "mr-zone-configs" DROP REGION "ca-central-1" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" DROP REGION "ca-central-1"; + +statement ok SET override_multi_region_zone_config = false query TT @@ -335,7 +391,11 @@ ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING lease_preferences = '[[+re statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE USING lease_preferences = '[[+region=ap-southeast-2]]'; + +statement ok SET override_multi_region_zone_config = false query TT @@ -356,7 +416,11 @@ ALTER DATABASE "mr-zone-configs" DROP REGION "us-east-1" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" DROP REGION "us-east-1"; + +statement ok SET override_multi_region_zone_config = false query TT @@ -431,7 +495,11 @@ ALTER table regional_by_row CONFIGURE ZONE USING num_replicas = 10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER table regional_by_row CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false statement error zone configuration for table regional_by_row contains incorrectly configured field "num_replicas" @@ -455,7 +523,11 @@ ALTER partition "us-east-1" of index regional_by_row@regional_by_row_pkey CONFIG statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER partition "us-east-1" of index regional_by_row@regional_by_row_pkey CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false query TTT @@ -474,7 +546,11 @@ ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY TABLE statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY TABLE; + +statement ok SET override_multi_region_zone_config = false statement error attempting to modify protected field "num_replicas" of a multi-region zone configuration @@ -544,7 +620,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER PARTITION "ap-southeast-2" OF INDEX regional_by_row@idx CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false statement error zone configuration for partition "ap-southeast-2" of regional_by_row@idx contains incorrectly configured field "num_replicas" @@ -555,7 +635,11 @@ ALTER TABLE regional_by_row SET LOCALITY GLOBAL statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER PARTITION "ap-southeast-2" OF INDEX regional_by_row@idx CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false statement error missing zone configuration for partition "ap-southeast-2" of regional_by_row@idx @@ -566,7 +650,11 @@ ALTER TABLE regional_by_row SET LOCALITY GLOBAL statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE regional_by_row SET LOCALITY GLOBAL; + +statement ok SET override_multi_region_zone_config = false statement ok @@ -576,7 +664,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() # to GLOBAL will reset it to true. statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER table regional_by_row CONFIGURE ZONE USING global_reads = false; + +statement ok SET override_multi_region_zone_config = false query TT @@ -595,7 +687,11 @@ TABLE regional_by_row ALTER TABLE regional_by_row CONFIGURE ZONE USING statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE regional_by_row SET LOCALITY GLOBAL; + +statement ok SET override_multi_region_zone_config = false query TT @@ -617,7 +713,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER index regional_by_row@regional_by_row_pkey CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone config which contains an extra zone configuration for index regional_by_row@regional_by_row_pkey with field num_replicas populated @@ -631,7 +731,11 @@ ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY ROW statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE regional_by_row SET LOCALITY REGIONAL BY ROW; + +statement ok SET override_multi_region_zone_config = false statement ok @@ -645,7 +749,11 @@ CREATE TABLE regional_by_row_as ( statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER index regional_by_row_as@regional_by_row_as_pkey CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -666,7 +774,11 @@ ALTER TABLE regional_by_row_as SET LOCALITY REGIONAL BY ROW statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE regional_by_row_as SET LOCALITY REGIONAL BY ROW; + +statement ok SET override_multi_region_zone_config = false query TT @@ -724,7 +836,11 @@ ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE DISCARD statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "mr-zone-configs" CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT @@ -746,7 +862,11 @@ ALTER PARTITION "ca-central-1" OF INDEX regional_by_row@regional_by_row_pkey CON statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER PARTITION "ca-central-1" OF INDEX regional_by_row@regional_by_row_pkey CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false # Drop invalid zone config database so validation passes. @@ -860,7 +980,11 @@ CREATE TABLE tbl1 ( statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl1@tbl1_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone config which contains an extra zone configuration for index tbl1@tbl1_i_idx with field num_replicas populated @@ -870,7 +994,11 @@ ALTER TABLE tbl1 SET LOCALITY GLOBAL # index above. statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl1 SET LOCALITY GLOBAL; + +statement ok SET override_multi_region_zone_config = false # Validate that the zone configuration is gone @@ -899,7 +1027,11 @@ ALTER TABLE tbl1 SET LOCALITY GLOBAL statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl1@tbl1_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone config which contains an extra zone configuration for index tbl1@tbl1_i_idx with field num_replicas populated @@ -907,7 +1039,11 @@ ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN "us-east-1" statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN "us-east-1"; + +statement ok SET override_multi_region_zone_config = false statement ok @@ -915,7 +1051,11 @@ ALTER TABLE tbl1 SET LOCALITY GLOBAL statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl1@tbl1_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone config which contains an extra zone configuration for index tbl1@tbl1_i_idx with field num_replicas populated @@ -923,7 +1063,11 @@ ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION; + +statement ok SET override_multi_region_zone_config = false statement ok @@ -931,7 +1075,11 @@ ALTER TABLE tbl1 SET LOCALITY REGIONAL BY ROW statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl1@tbl1_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone config which contains an extra zone configuration for index tbl1@tbl1_i_idx with field num_replicas populated @@ -939,7 +1087,11 @@ ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl1 SET LOCALITY REGIONAL BY TABLE IN PRIMARY REGION; + +statement ok SET override_multi_region_zone_config = false # Now test to ensure that the same test holds true if we're transitioning from @@ -953,8 +1105,15 @@ CREATE TABLE tbl2 ( statement ok SET override_multi_region_zone_config = true; + +statement ok +SET use_declarative_schema_changer = off; + +statement ok ALTER INDEX tbl2@tbl2_i_idx CONFIGURE ZONE USING num_replicas=10; -SET override_multi_region_zone_config = false + +statement ok +SET override_multi_region_zone_config = false; statement error attempting to update zone config which contains an extra zone configuration for index tbl2@tbl2_i_idx with field num_replicas populated ALTER TABLE tbl2 SET LOCALITY GLOBAL @@ -977,7 +1136,11 @@ INDEX tbl2@tbl2_i_idx ALTER INDEX tbl2@tbl2_i_idx CONFIGURE ZONE USING statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl2 SET LOCALITY GLOBAL; + +statement ok SET override_multi_region_zone_config = false # Validate that we don't overwrite gc.ttlseconds @@ -1000,7 +1163,11 @@ ALTER TABLE tbl2 SET LOCALITY REGIONAL BY ROW statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl2 CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false statement error attempting to update zone configuration for table tbl2 which contains modified field "num_replicas" @@ -1008,7 +1175,11 @@ ALTER TABLE tbl2 SET LOCALITY GLOBAL statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl2 SET LOCALITY GLOBAL; + +statement ok SET override_multi_region_zone_config = false statement ok @@ -1036,7 +1207,11 @@ ALTER INDEX tbl3@tbl3_i_idx CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl3@tbl3_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1076,7 +1251,11 @@ ALTER partition "us-east-1" of index tbl3@tbl3_i_idx CONFIGURE ZONE DISCARD # ...but we should be able to override. statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER partition "us-east-1" of index tbl3@tbl3_i_idx CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false # Try a similar test but on the table zone configuration. @@ -1088,7 +1267,11 @@ ALTER TABLE tbl3 CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl3 CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1143,7 +1326,11 @@ ALTER INDEX tbl4@tbl4_i_idx CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl4@tbl4_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1184,7 +1371,11 @@ ALTER TABLE tbl4 CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl4 CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1236,7 +1427,11 @@ ALTER INDEX tbl5@tbl5_i_idx CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl5@tbl5_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1277,7 +1472,11 @@ ALTER TABLE tbl5 CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl5 CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1299,7 +1498,11 @@ ALTER TABLE tbl5 CONFIGURE ZONE DISCARD statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl5 CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1337,7 +1540,11 @@ ALTER INDEX tbl6@tbl6_i_idx CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER INDEX tbl6@tbl6_i_idx CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1381,7 +1588,11 @@ ALTER TABLE tbl6 CONFIGURE ZONE USING num_replicas=10 statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl6 CONFIGURE ZONE USING num_replicas=10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1405,7 +1616,11 @@ ALTER TABLE tbl6 CONFIGURE ZONE DISCARD statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl6 CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1468,7 +1683,11 @@ PARTITION "us-east-1" OF INDEX tbl8@tbl8_pkey ALTER PARTITION "us-east-1" OF IN statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER PARTITION "us-east-1" OF INDEX tbl8@tbl8_pkey CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1561,7 +1780,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl9 CONFIGURE ZONE USING num_replicas = 5; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1602,7 +1825,11 @@ CREATE TABLE tbl10 ( statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER table tbl10 CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1645,7 +1872,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER TABLE tbl10 CONFIGURE ZONE USING num_replicas = 5; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1749,7 +1980,11 @@ DATABASE rebuild_zc_db ALTER DATABASE rebuild_zc_db CONFIGURE ZONE USING statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "rebuild_zc_db" CONFIGURE ZONE USING num_replicas = 10; + +statement ok SET override_multi_region_zone_config = false query TT @@ -1789,7 +2024,11 @@ SELECT crdb_internal.validate_multi_region_zone_configs() statement ok SET override_multi_region_zone_config = true; + +statement ok ALTER DATABASE "rebuild_zc_db" CONFIGURE ZONE DISCARD; + +statement ok SET override_multi_region_zone_config = false query TT diff --git a/pkg/ccl/logictestccl/testdata/logic_test/plpgsql_txn b/pkg/ccl/logictestccl/testdata/logic_test/plpgsql_txn index e9ff1287beec..8c06b5fc4d3f 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/plpgsql_txn +++ b/pkg/ccl/logictestccl/testdata/logic_test/plpgsql_txn @@ -873,7 +873,7 @@ CREATE PROCEDURE p() LANGUAGE PLpgSQL AS $$ END $$; -skipif config local-read-committed +skipif config weak-iso-level-configs query T noticetrace CALL p(); ---- @@ -895,7 +895,7 @@ NOTICE: read committed NOTICE: ROLLBACK; NOTICE: serializable -skipif config local-read-committed +skipif config weak-iso-level-configs query T SHOW TRANSACTION ISOLATION LEVEL; ---- diff --git a/pkg/ccl/logictestccl/testdata/logic_test/procedure_params b/pkg/ccl/logictestccl/testdata/logic_test/procedure_params index 0c77f4cd0e36..1bb26675ee8b 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/procedure_params +++ b/pkg/ccl/logictestccl/testdata/logic_test/procedure_params @@ -347,6 +347,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_param_types]; ---- CREATE PROCEDURE public.p_param_types(IN p1 INT8, INOUT p2 INT8, INOUT p3 INT8, OUT p4 INT8) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT p2, p3, p1; @@ -368,6 +369,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_param_types]; ---- CREATE PROCEDURE public.p_param_types(OUT param INT8) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT 1; @@ -480,6 +482,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT column3 INT8) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT 3 INTO column3; @@ -505,6 +508,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT INT8) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := 2; @@ -526,6 +530,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN in_param INT8, OUT INT8) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT in_param INTO param2; diff --git a/pkg/ccl/logictestccl/testdata/logic_test/read_committed b/pkg/ccl/logictestccl/testdata/logic_test/read_committed index 2f34bc9931b4..b6e411a8594d 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/read_committed +++ b/pkg/ccl/logictestccl/testdata/logic_test/read_committed @@ -84,12 +84,12 @@ SELECT aisle FROM supermarket WHERE person = 'matilda' FOR UPDATE statement ok ROLLBACK -# Use SELECT FOR UPDATE under snapshot isolation. +# Use SELECT FOR UPDATE under repeatable read isolation. statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ query I SELECT aisle FROM supermarket WHERE person = 'matilda' FOR UPDATE @@ -100,7 +100,7 @@ statement ok ROLLBACK statement ok -RESET CLUSTER SETTING sql.txn.snapshot_isolation.enabled +RESET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled # Use SELECT FOR UPDATE in a subquery under read committed isolation. statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_insert_fast_path b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_insert_fast_path index 82e471282428..8f25d5c8ce72 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_insert_fast_path +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_insert_fast_path @@ -200,7 +200,7 @@ query T rowsort SELECT message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%batch%' AND message LIKE '%Scan%' ---- -r68: sending batch 4 Scan to (n1,s1):1 +r69: sending batch 4 Scan to (n1,s1):1 # Regression test for #115377. statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior index a3111dca5d35..e19558343f50 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior @@ -264,7 +264,7 @@ ap-southeast-2 23 query TT SELECT start_key, end_key FROM [SHOW RANGE FROM TABLE regional_by_row_table FOR ROW ('ap-southeast-2', 1)] ---- - … + … query TIIII SELECT crdb_region, pk, pk2, a, b FROM regional_by_row_table @@ -402,7 +402,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX r ORDER BY 1 ---- start_key end_key replicas lease_holder - …/"\x80"/0 {1} 1 + …/"\x80"/0 {1} 1 …/"\x80"/0 …/"\xc0"/0 {4} 4 …/"\xc0"/0 {7} 7 diff --git a/pkg/ccl/logictestccl/testdata/logic_test/show_create b/pkg/ccl/logictestccl/testdata/logic_test/show_create index 7db494238417..b6b7156f2fd5 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/show_create +++ b/pkg/ccl/logictestccl/testdata/logic_test/show_create @@ -76,6 +76,7 @@ r1 CREATE FUNCTION public.r1() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -85,6 +86,7 @@ r1 CREATE FUNCTION public.r1(i INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -95,11 +97,13 @@ SELECT * FROM [SHOW CREATE PROCEDURE r1] ORDER BY 2 ---- r1 CREATE PROCEDURE public.r1(s STRING) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ r1 CREATE PROCEDURE public.r1(s STRING, i INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -131,6 +135,7 @@ r2 CREATE FUNCTION sc.r2() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -140,6 +145,7 @@ SHOW CREATE PROCEDURE r2 ---- r2 CREATE PROCEDURE sc.r2(s STRING) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -169,6 +175,7 @@ f112134 CREATE FUNCTION sc.f112134() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE x INT8 := 0; diff --git a/pkg/ccl/logictestccl/testdata/logic_test/udf_params b/pkg/ccl/logictestccl/testdata/logic_test/udf_params index 14797533673d..1f385c3faee4 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/udf_params +++ b/pkg/ccl/logictestccl/testdata/logic_test/udf_params @@ -224,6 +224,7 @@ CREATE FUNCTION public.f_param_types(IN p1 INT8, INOUT p2 INT8, INOUT p3 INT8, O NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT p2, p3, p1; @@ -249,6 +250,7 @@ CREATE FUNCTION public.f_param_types(OUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT 1; @@ -323,6 +325,7 @@ CREATE FUNCTION public.f_out_int(OUT param_new INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param_new := 2; @@ -378,6 +381,7 @@ CREATE FUNCTION public.f_int(INOUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param := 2; @@ -408,6 +412,7 @@ CREATE FUNCTION public.f_int(IN param INT8, OUT param_out INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT param INTO param_out; @@ -432,6 +437,7 @@ CREATE FUNCTION public.f_int(OUT param_out INT8, IN param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT param INTO param_out; @@ -456,6 +462,7 @@ CREATE FUNCTION public.f_int(INOUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param := param; @@ -488,6 +495,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT I NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := 2; @@ -546,6 +554,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT c NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT 3 INTO column3; @@ -575,6 +584,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT I NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := 2; @@ -594,6 +604,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN in_param IN NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN SELECT in_param INTO param2; diff --git a/pkg/ccl/logictestccl/testdata/logic_test/udf_rewrite b/pkg/ccl/logictestccl/testdata/logic_test/udf_rewrite index e5a93d563f05..3f908d4faeca 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/udf_rewrite +++ b/pkg/ccl/logictestccl/testdata/logic_test/udf_rewrite @@ -94,6 +94,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE i INT8 := nextval('public.seq'::REGCLASS); @@ -157,6 +158,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE i INT8 := nextval('public.renamed'::REGCLASS); @@ -289,6 +291,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE day public.weekday := 'wednesday':::public.weekday; @@ -355,6 +358,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ DECLARE day public.workday := 'humpday':::public.workday; @@ -434,6 +438,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite(INOUT param1 public.weekday, OUT par NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := param1; @@ -461,6 +466,7 @@ f_rewrite CREATE FUNCTION public.f_rewrite(INOUT param1 public.workday, OUT par NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := param1; @@ -533,6 +539,7 @@ SHOW CREATE PROCEDURE p_rewrite; ---- p_rewrite CREATE PROCEDURE public.p_rewrite(INOUT param1 public.weekday, OUT param2 public.weekday) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := param1; @@ -556,6 +563,7 @@ SHOW CREATE PROCEDURE p_rewrite; ---- p_rewrite CREATE PROCEDURE public.p_rewrite(INOUT param1 public.workday, OUT param2 public.workday) LANGUAGE plpgsql + SECURITY INVOKER AS $$ BEGIN param2 := param1; diff --git a/pkg/ccl/logictestccl/testdata/logic_test/zone b/pkg/ccl/logictestccl/testdata/logic_test/zone index e1ed1bbfa546..16b669f41bd5 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/zone +++ b/pkg/ccl/logictestccl/testdata/logic_test/zone @@ -644,6 +644,8 @@ TABLE system.public.tenant_usage TABLE system.public.transaction_activity NULL system transaction_activity NULL NULL TABLE system.public.transaction_statistics NULL system transaction_statistics NULL NULL TABLE test.public.t NULL test t NULL NULL +TABLE test.public.t36642 NULL test t36642 NULL NULL +TABLE test.public.t36644 NULL test t36644 NULL NULL # Test the zone information being displayed in SHOW CREATE statement ok diff --git a/pkg/ccl/logictestccl/tests/3node-tenant-multiregion/generated_test.go b/pkg/ccl/logictestccl/tests/3node-tenant-multiregion/generated_test.go index 1acf9394ce10..d1351a04999f 100644 --- a/pkg/ccl/logictestccl/tests/3node-tenant-multiregion/generated_test.go +++ b/pkg/ccl/logictestccl/tests/3node-tenant-multiregion/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 10 +const configIdx = 11 var logicTestDir string var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go b/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go index 5260dacaaf74..3d3b1a5888aa 100644 --- a/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go +++ b/pkg/ccl/logictestccl/tests/3node-tenant/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 9 +const configIdx = 10 var logicTestDir string var cclLogicTestDir string @@ -620,6 +620,13 @@ func TestTenantLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestTenantLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestTenantLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/ccl/logictestccl/tests/5node/generated_test.go b/pkg/ccl/logictestccl/tests/5node/generated_test.go index ba6cbea3b94c..a68c43d0c8df 100644 --- a/pkg/ccl/logictestccl/tests/5node/generated_test.go +++ b/pkg/ccl/logictestccl/tests/5node/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 7 +const configIdx = 8 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/cockroach-go-testserver-24.1/generated_test.go b/pkg/ccl/logictestccl/tests/cockroach-go-testserver-24.1/generated_test.go index f7b618a4f1ec..090cc4a52fe9 100644 --- a/pkg/ccl/logictestccl/tests/cockroach-go-testserver-24.1/generated_test.go +++ b/pkg/ccl/logictestccl/tests/cockroach-go-testserver-24.1/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 20 +const configIdx = 21 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/fakedist-disk/generated_test.go b/pkg/ccl/logictestccl/tests/fakedist-disk/generated_test.go index 9e86a3af1b08..9f5c937b1b42 100644 --- a/pkg/ccl/logictestccl/tests/fakedist-disk/generated_test.go +++ b/pkg/ccl/logictestccl/tests/fakedist-disk/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 6 +const configIdx = 7 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/fakedist-vec-off/generated_test.go b/pkg/ccl/logictestccl/tests/fakedist-vec-off/generated_test.go index d76c084448ad..b2f4c60049f2 100644 --- a/pkg/ccl/logictestccl/tests/fakedist-vec-off/generated_test.go +++ b/pkg/ccl/logictestccl/tests/fakedist-vec-off/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 5 +const configIdx = 6 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/fakedist/generated_test.go b/pkg/ccl/logictestccl/tests/fakedist/generated_test.go index 687b1bad73b8..6c9e1001b93a 100644 --- a/pkg/ccl/logictestccl/tests/fakedist/generated_test.go +++ b/pkg/ccl/logictestccl/tests/fakedist/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 4 +const configIdx = 5 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/local-mixed-24.1/generated_test.go b/pkg/ccl/logictestccl/tests/local-mixed-24.1/generated_test.go index 7a70085bf7f8..ed400e92c132 100644 --- a/pkg/ccl/logictestccl/tests/local-mixed-24.1/generated_test.go +++ b/pkg/ccl/logictestccl/tests/local-mixed-24.1/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 18 +const configIdx = 19 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/local-mixed-24.2/generated_test.go b/pkg/ccl/logictestccl/tests/local-mixed-24.2/generated_test.go index 85e5d2cebb5c..c34b24dabe47 100644 --- a/pkg/ccl/logictestccl/tests/local-mixed-24.2/generated_test.go +++ b/pkg/ccl/logictestccl/tests/local-mixed-24.2/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 19 +const configIdx = 20 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/local-read-committed/BUILD.bazel b/pkg/ccl/logictestccl/tests/local-read-committed/BUILD.bazel index e2e8bb3d5a71..5ecf61957e5c 100644 --- a/pkg/ccl/logictestccl/tests/local-read-committed/BUILD.bazel +++ b/pkg/ccl/logictestccl/tests/local-read-committed/BUILD.bazel @@ -7,10 +7,11 @@ go_test( data = [ "//c-deps:libgeos", # keep "//pkg/ccl/logictestccl:testdata", # keep + "//pkg/sql/logictest:testdata", # keep "//pkg/sql/opt/exec/execbuilder:testdata", # keep ], exec_properties = {"test.Pool": "large"}, - shard_count = 37, + shard_count = 48, tags = ["cpu:1"], deps = [ "//pkg/base", diff --git a/pkg/ccl/logictestccl/tests/local-read-committed/generated_test.go b/pkg/ccl/logictestccl/tests/local-read-committed/generated_test.go index 1c01d0fc3a7a..694eb3c25c38 100644 --- a/pkg/ccl/logictestccl/tests/local-read-committed/generated_test.go +++ b/pkg/ccl/logictestccl/tests/local-read-committed/generated_test.go @@ -32,10 +32,20 @@ import ( const configIdx = 3 +var logicTestDir string var cclLogicTestDir string var execBuildLogicTestDir string func init() { + if bazel.BuiltWithBazel() { + var err error + logicTestDir, err = bazel.Runfile("pkg/sql/logictest/testdata/logic_test") + if err != nil { + panic(err) + } + } else { + logicTestDir = "../../../../sql/logictest/testdata/logic_test" + } if bazel.BuiltWithBazel() { var err error cclLogicTestDir, err = bazel.Runfile("pkg/ccl/logictestccl/testdata/logic_test") @@ -70,6 +80,10 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func runLogicTest(t *testing.T, file string) { + skip.UnderDeadlock(t, "times out and/or hangs") + logictest.RunLogicTest(t, logictest.TestServerArgs{}, configIdx, filepath.Join(logicTestDir, file)) +} func runCCLLogicTest(t *testing.T, file string) { skip.UnderDeadlock(t, "times out and/or hangs") logictest.RunLogicTest(t, logictest.TestServerArgs{}, configIdx, filepath.Join(cclLogicTestDir, file)) @@ -96,6 +110,8 @@ func runExecBuildLogicTest(t *testing.T, file string) { func TestLogic_tmp(t *testing.T) { defer leaktest.AfterTest(t)() var glob string + glob = filepath.Join(logicTestDir, "_*") + logictest.RunLogicTests(t, logictest.TestServerArgs{}, configIdx, glob) glob = filepath.Join(cclLogicTestDir, "_*") logictest.RunLogicTests(t, logictest.TestServerArgs{}, configIdx, glob) glob = filepath.Join(execBuildLogicTestDir, "_*") @@ -105,6 +121,2456 @@ func TestLogic_tmp(t *testing.T) { logictest.RunLogicTests(t, serverArgs, configIdx, glob) } +func TestReadCommittedLogic_aggregate( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "aggregate") +} + +func TestReadCommittedLogic_alias_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alias_types") +} + +func TestReadCommittedLogic_alter_column_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_column_type") +} + +func TestReadCommittedLogic_alter_database_convert_to_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_database_convert_to_schema") +} + +func TestReadCommittedLogic_alter_database_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_database_owner") +} + +func TestReadCommittedLogic_alter_default_privileges_for_all_roles( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_all_roles") +} + +func TestReadCommittedLogic_alter_default_privileges_for_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_schema") +} + +func TestReadCommittedLogic_alter_default_privileges_for_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_sequence") +} + +func TestReadCommittedLogic_alter_default_privileges_for_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_table") +} + +func TestReadCommittedLogic_alter_default_privileges_for_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_type") +} + +func TestReadCommittedLogic_alter_default_privileges_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_in_schema") +} + +func TestReadCommittedLogic_alter_default_privileges_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_with_grant_option") +} + +func TestReadCommittedLogic_alter_primary_key( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_primary_key") +} + +func TestReadCommittedLogic_alter_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_role") +} + +func TestReadCommittedLogic_alter_role_set( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_role_set") +} + +func TestReadCommittedLogic_alter_schema_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_schema_owner") +} + +func TestReadCommittedLogic_alter_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_sequence") +} + +func TestReadCommittedLogic_alter_sequence_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_sequence_owner") +} + +func TestReadCommittedLogic_alter_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_table") +} + +func TestReadCommittedLogic_alter_table_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_table_owner") +} + +func TestReadCommittedLogic_alter_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_type") +} + +func TestReadCommittedLogic_alter_type_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_type_owner") +} + +func TestReadCommittedLogic_alter_view_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_view_owner") +} + +func TestReadCommittedLogic_and_or( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "and_or") +} + +func TestReadCommittedLogic_apply_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "apply_join") +} + +func TestReadCommittedLogic_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "array") +} + +func TestReadCommittedLogic_as_of( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "as_of") +} + +func TestReadCommittedLogic_asyncpg( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "asyncpg") +} + +func TestReadCommittedLogic_auto_span_config_reconciliation_job( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "auto_span_config_reconciliation_job") +} + +func TestReadCommittedLogic_bit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "bit") +} + +func TestReadCommittedLogic_builtin_function( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "builtin_function") +} + +func TestReadCommittedLogic_builtin_function_notenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "builtin_function_notenant") +} + +func TestReadCommittedLogic_bytes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "bytes") +} + +func TestReadCommittedLogic_cascade( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cascade") +} + +func TestReadCommittedLogic_case_sensitive_names( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "case_sensitive_names") +} + +func TestReadCommittedLogic_cast( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cast") +} + +func TestReadCommittedLogic_check_constraints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "check_constraints") +} + +func TestReadCommittedLogic_cluster_locks( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cluster_locks") +} + +func TestReadCommittedLogic_cluster_settings( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cluster_settings") +} + +func TestReadCommittedLogic_collatedstring( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring") +} + +func TestReadCommittedLogic_collatedstring_constraint( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_constraint") +} + +func TestReadCommittedLogic_collatedstring_index1( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_index1") +} + +func TestReadCommittedLogic_collatedstring_index2( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_index2") +} + +func TestReadCommittedLogic_collatedstring_normalization( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_normalization") +} + +func TestReadCommittedLogic_collatedstring_nullinindex( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_nullinindex") +} + +func TestReadCommittedLogic_collatedstring_uniqueindex1( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_uniqueindex1") +} + +func TestReadCommittedLogic_collatedstring_uniqueindex2( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_uniqueindex2") +} + +func TestReadCommittedLogic_comment_on( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "comment_on") +} + +func TestReadCommittedLogic_composite_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "composite_types") +} + +func TestReadCommittedLogic_computed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "computed") +} + +func TestReadCommittedLogic_conditional( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "conditional") +} + +func TestReadCommittedLogic_connect_privilege( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "connect_privilege") +} + +func TestReadCommittedLogic_crdb_internal( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal") +} + +func TestReadCommittedLogic_crdb_internal_catalog( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal_catalog") +} + +func TestReadCommittedLogic_crdb_internal_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal_default_privileges") +} + +func TestReadCommittedLogic_create_as( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_as") +} + +func TestReadCommittedLogic_create_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_index") +} + +func TestReadCommittedLogic_create_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_table") +} + +func TestReadCommittedLogic_cross_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cross_join") +} + +func TestReadCommittedLogic_cursor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cursor") +} + +func TestReadCommittedLogic_custom_escape_character( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "custom_escape_character") +} + +func TestReadCommittedLogic_dangerous_statements( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "dangerous_statements") +} + +func TestReadCommittedLogic_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "database") +} + +func TestReadCommittedLogic_datetime( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "datetime") +} + +func TestReadCommittedLogic_decimal( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "decimal") +} + +func TestReadCommittedLogic_default( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "default") +} + +func TestReadCommittedLogic_delete( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete") +} + +func TestReadCommittedLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + +func TestReadCommittedLogic_dependencies( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "dependencies") +} + +func TestReadCommittedLogic_discard( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "discard") +} + +func TestReadCommittedLogic_disjunction_in_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "disjunction_in_join") +} + +func TestReadCommittedLogic_distinct( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distinct") +} + +func TestReadCommittedLogic_distinct_on( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distinct_on") +} + +func TestReadCommittedLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + +func TestReadCommittedLogic_distsql_automatic_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_stats") +} + +func TestReadCommittedLogic_distsql_event_log( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_event_log") +} + +func TestReadCommittedLogic_distsql_expr( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_expr") +} + +func TestReadCommittedLogic_distsql_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_join") +} + +func TestReadCommittedLogic_distsql_srfs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_srfs") +} + +func TestReadCommittedLogic_drop_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_database") +} + +func TestReadCommittedLogic_drop_function( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_function") +} + +func TestReadCommittedLogic_drop_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_index") +} + +func TestReadCommittedLogic_drop_owned_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_owned_by") +} + +func TestReadCommittedLogic_drop_procedure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_procedure") +} + +func TestReadCommittedLogic_drop_role_with_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_role_with_default_privileges") +} + +func TestReadCommittedLogic_drop_role_with_default_privileges_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_role_with_default_privileges_in_schema") +} + +func TestReadCommittedLogic_drop_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_schema") +} + +func TestReadCommittedLogic_drop_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_sequence") +} + +func TestReadCommittedLogic_drop_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_table") +} + +func TestReadCommittedLogic_drop_temp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_temp") +} + +func TestReadCommittedLogic_drop_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_type") +} + +func TestReadCommittedLogic_drop_user( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_user") +} + +func TestReadCommittedLogic_drop_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_view") +} + +func TestReadCommittedLogic_edge( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "edge") +} + +func TestReadCommittedLogic_enums( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "enums") +} + +func TestReadCommittedLogic_errors( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "errors") +} + +func TestReadCommittedLogic_event_log( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "event_log") +} + +func TestReadCommittedLogic_exclude_data_from_backup( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "exclude_data_from_backup") +} + +func TestReadCommittedLogic_experimental_distsql_planning( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "experimental_distsql_planning") +} + +func TestReadCommittedLogic_explain_analyze( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "explain_analyze") +} + +func TestReadCommittedLogic_export( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "export") +} + +func TestReadCommittedLogic_expression_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "expression_index") +} + +func TestReadCommittedLogic_external_connection_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "external_connection_privileges") +} + +func TestReadCommittedLogic_family( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "family") +} + +func TestReadCommittedLogic_fk( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "fk") +} + +func TestReadCommittedLogic_float( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "float") +} + +func TestReadCommittedLogic_format( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "format") +} + +func TestReadCommittedLogic_function_lookup( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "function_lookup") +} + +func TestReadCommittedLogic_fuzzystrmatch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "fuzzystrmatch") +} + +func TestReadCommittedLogic_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial") +} + +func TestReadCommittedLogic_geospatial_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_index") +} + +func TestReadCommittedLogic_geospatial_meta( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_meta") +} + +func TestReadCommittedLogic_geospatial_regression( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_regression") +} + +func TestReadCommittedLogic_geospatial_zm( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_zm") +} + +func TestReadCommittedLogic_grant_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_database") +} + +func TestReadCommittedLogic_grant_in_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_in_txn") +} + +func TestReadCommittedLogic_grant_on_all_sequences_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_on_all_sequences_in_schema") +} + +func TestReadCommittedLogic_grant_on_all_tables_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_on_all_tables_in_schema") +} + +func TestReadCommittedLogic_grant_revoke_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_revoke_with_grant_option") +} + +func TestReadCommittedLogic_grant_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_role") +} + +func TestReadCommittedLogic_grant_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_schema") +} + +func TestReadCommittedLogic_group_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "group_join") +} + +func TestReadCommittedLogic_hash_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hash_join") +} + +func TestReadCommittedLogic_hash_sharded_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hash_sharded_index") +} + +func TestReadCommittedLogic_hidden_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hidden_columns") +} + +func TestReadCommittedLogic_impure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "impure") +} + +func TestReadCommittedLogic_index_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "index_join") +} + +func TestReadCommittedLogic_inet( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inet") +} + +func TestReadCommittedLogic_inflight_trace_spans( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inflight_trace_spans") +} + +func TestReadCommittedLogic_information_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "information_schema") +} + +func TestReadCommittedLogic_inner_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inner-join") +} + +func TestReadCommittedLogic_insert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "insert") +} + +func TestReadCommittedLogic_int_size( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "int_size") +} + +func TestReadCommittedLogic_internal_executor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "internal_executor") +} + +func TestReadCommittedLogic_interval( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "interval") +} + +func TestReadCommittedLogic_inverted_filter_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_filter_geospatial") +} + +func TestReadCommittedLogic_inverted_filter_json_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_filter_json_array") +} + +func TestReadCommittedLogic_inverted_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_index") +} + +func TestReadCommittedLogic_inverted_index_multi_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_index_multi_column") +} + +func TestReadCommittedLogic_inverted_join_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_geospatial") +} + +func TestReadCommittedLogic_inverted_join_json_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_json_array") +} + +func TestReadCommittedLogic_inverted_join_multi_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_multi_column") +} + +func TestReadCommittedLogic_jobs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "jobs") +} + +func TestReadCommittedLogic_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "join") +} + +func TestReadCommittedLogic_json( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json") +} + +func TestReadCommittedLogic_json_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json_builtins") +} + +func TestReadCommittedLogic_json_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json_index") +} + +func TestReadCommittedLogic_kv_builtin_functions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "kv_builtin_functions") +} + +func TestReadCommittedLogic_limit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "limit") +} + +func TestReadCommittedLogic_locality( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "locality") +} + +func TestReadCommittedLogic_lock_timeout( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lock_timeout") +} + +func TestReadCommittedLogic_lookup_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lookup_join") +} + +func TestReadCommittedLogic_lookup_join_spans( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lookup_join_spans") +} + +func TestReadCommittedLogic_manual_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "manual_retry") +} + +func TestReadCommittedLogic_materialized_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "materialized_view") +} + +func TestReadCommittedLogic_merge_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "merge_join") +} + +func TestReadCommittedLogic_multi_statement( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "multi_statement") +} + +func TestReadCommittedLogic_name_escapes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "name_escapes") +} + +func TestReadCommittedLogic_namespace( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "namespace") +} + +func TestReadCommittedLogic_new_schema_changer( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "new_schema_changer") +} + +func TestReadCommittedLogic_no_primary_key( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "no_primary_key") +} + +func TestReadCommittedLogic_notice( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "notice") +} + +func TestReadCommittedLogic_numeric_references( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "numeric_references") +} + +func TestReadCommittedLogic_on_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "on_update") +} + +func TestReadCommittedLogic_operator( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "operator") +} + +func TestReadCommittedLogic_optimizer_timeout( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "optimizer_timeout") +} + +func TestReadCommittedLogic_order_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "order_by") +} + +func TestReadCommittedLogic_ordinal_references( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "ordinal_references") +} + +func TestReadCommittedLogic_ordinality( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "ordinality") +} + +func TestReadCommittedLogic_overflow( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "overflow") +} + +func TestReadCommittedLogic_overlaps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "overlaps") +} + +func TestReadCommittedLogic_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "owner") +} + +func TestReadCommittedLogic_parallel_stmts_compat( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "parallel_stmts_compat") +} + +func TestReadCommittedLogic_partial_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "partial_index") +} + +func TestReadCommittedLogic_partial_txn_commit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "partial_txn_commit") +} + +func TestReadCommittedLogic_pg_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_builtins") +} + +func TestReadCommittedLogic_pg_catalog_pg_default_acl( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_catalog_pg_default_acl") +} + +func TestReadCommittedLogic_pg_catalog_pg_default_acl_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_catalog_pg_default_acl_with_grant_option") +} + +func TestReadCommittedLogic_pg_extension( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_extension") +} + +func TestReadCommittedLogic_pg_lsn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_lsn") +} + +func TestReadCommittedLogic_pgcrypto_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pgcrypto_builtins") +} + +func TestReadCommittedLogic_pgoidtype( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pgoidtype") +} + +func TestReadCommittedLogic_plpgsql_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "plpgsql_builtins") +} + +func TestReadCommittedLogic_poison_after_push( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "poison_after_push") +} + +func TestReadCommittedLogic_postgres_jsonb( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "postgres_jsonb") +} + +func TestReadCommittedLogic_postgresjoin( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "postgresjoin") +} + +func TestReadCommittedLogic_privilege_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privilege_builtins") +} + +func TestReadCommittedLogic_privileges_comments( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privileges_comments") +} + +func TestReadCommittedLogic_privileges_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privileges_table") +} + +func TestReadCommittedLogic_proc_invokes_proc( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "proc_invokes_proc") +} + +func TestReadCommittedLogic_procedure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure") +} + +func TestReadCommittedLogic_procedure_deps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_deps") +} + +func TestReadCommittedLogic_procedure_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_params") +} + +func TestReadCommittedLogic_procedure_polymorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_polymorphic") +} + +func TestReadCommittedLogic_procedure_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_privileges") +} + +func TestReadCommittedLogic_procedure_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_schema_change") +} + +func TestReadCommittedLogic_propagate_input_ordering( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "propagate_input_ordering") +} + +func TestReadCommittedLogic_reassign_owned_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "reassign_owned_by") +} + +func TestReadCommittedLogic_record( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "record") +} + +func TestReadCommittedLogic_redact_descriptor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "redact_descriptor") +} + +func TestReadCommittedLogic_rename_atomic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_atomic") +} + +func TestReadCommittedLogic_rename_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_column") +} + +func TestReadCommittedLogic_rename_constraint( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_constraint") +} + +func TestReadCommittedLogic_rename_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_database") +} + +func TestReadCommittedLogic_rename_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_index") +} + +func TestReadCommittedLogic_rename_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_sequence") +} + +func TestReadCommittedLogic_rename_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_table") +} + +func TestReadCommittedLogic_rename_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_view") +} + +func TestReadCommittedLogic_reset( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "reset") +} + +func TestReadCommittedLogic_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "retry") +} + +func TestReadCommittedLogic_returning( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "returning") +} + +func TestReadCommittedLogic_routine_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "routine_schema_change") +} + +func TestReadCommittedLogic_row_level_ttl( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "row_level_ttl") +} + +func TestReadCommittedLogic_rows_from( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rows_from") +} + +func TestReadCommittedLogic_run_control( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "run_control") +} + +func TestReadCommittedLogic_save_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "save_table") +} + +func TestReadCommittedLogic_savepoints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "savepoints") +} + +func TestReadCommittedLogic_scale( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scale") +} + +func TestReadCommittedLogic_scatter( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scatter") +} + +func TestReadCommittedLogic_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema") +} + +func TestReadCommittedLogic_schema_change_feature_flags( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_feature_flags") +} + +func TestReadCommittedLogic_schema_change_in_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_in_txn") +} + +func TestReadCommittedLogic_schema_change_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_retry") +} + +func TestReadCommittedLogic_schema_locked( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_locked") +} + +func TestReadCommittedLogic_schema_repair( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_repair") +} + +func TestReadCommittedLogic_scrub( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scrub") +} + +func TestReadCommittedLogic_secondary_index_column_families( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "secondary_index_column_families") +} + +func TestReadCommittedLogic_select( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select") +} + +func TestReadCommittedLogic_select_for_share( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_for_share") +} + +func TestReadCommittedLogic_select_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_index") +} + +func TestReadCommittedLogic_select_index_flags( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_index_flags") +} + +func TestReadCommittedLogic_select_search_path( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_search_path") +} + +func TestReadCommittedLogic_select_table_alias( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_table_alias") +} + +func TestReadCommittedLogic_sequences( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences") +} + +func TestReadCommittedLogic_sequences_distsql( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences_distsql") +} + +func TestReadCommittedLogic_sequences_regclass( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences_regclass") +} + +func TestReadCommittedLogic_serial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "serial") +} + +func TestReadCommittedLogic_serializable_eager_restart( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "serializable_eager_restart") +} + +func TestReadCommittedLogic_set_local( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_local") +} + +func TestReadCommittedLogic_set_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_role") +} + +func TestReadCommittedLogic_set_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_schema") +} + +func TestReadCommittedLogic_set_time_zone( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_time_zone") +} + +func TestReadCommittedLogic_shift( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "shift") +} + +func TestReadCommittedLogic_show_commit_timestamp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_commit_timestamp") +} + +func TestReadCommittedLogic_show_completions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_completions") +} + +func TestReadCommittedLogic_show_create( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create") +} + +func TestReadCommittedLogic_show_create_all_schemas( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_schemas") +} + +func TestReadCommittedLogic_show_create_all_tables( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_tables") +} + +func TestReadCommittedLogic_show_create_all_tables_builtin( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_tables_builtin") +} + +func TestReadCommittedLogic_show_create_all_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_types") +} + +func TestReadCommittedLogic_show_create_redact( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_redact") +} + +func TestReadCommittedLogic_show_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_default_privileges") +} + +func TestReadCommittedLogic_show_external_connections( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_external_connections") +} + +func TestReadCommittedLogic_show_fingerprints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_fingerprints") +} + +func TestReadCommittedLogic_show_grants_on_virtual_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_grants_on_virtual_table") +} + +func TestReadCommittedLogic_show_grants_synthetic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_grants_synthetic") +} + +func TestReadCommittedLogic_show_indexes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_indexes") +} + +func TestReadCommittedLogic_show_transfer_state( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_transfer_state") +} + +func TestReadCommittedLogic_show_var( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_var") +} + +func TestReadCommittedLogic_span_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "span_builtins") +} + +func TestReadCommittedLogic_split_at( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "split_at") +} + +func TestReadCommittedLogic_sqllite( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqllite") +} + +func TestReadCommittedLogic_sqlliveness( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqlliveness") +} + +func TestReadCommittedLogic_sqlsmith( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqlsmith") +} + +func TestReadCommittedLogic_srfs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "srfs") +} + +func TestReadCommittedLogic_statement_source( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_source") +} + +func TestReadCommittedLogic_statement_statistics_errors( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_statistics_errors") +} + +func TestReadCommittedLogic_statement_statistics_errors_redacted( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_statistics_errors_redacted") +} + +func TestReadCommittedLogic_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "stats") +} + +func TestReadCommittedLogic_storing( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "storing") +} + +func TestReadCommittedLogic_strict_ddl_atomicity( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "strict_ddl_atomicity") +} + +func TestReadCommittedLogic_suboperators( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "suboperators") +} + +func TestReadCommittedLogic_subquery( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "subquery") +} + +func TestReadCommittedLogic_subquery_correlated( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "subquery_correlated") +} + +func TestReadCommittedLogic_synthetic_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "synthetic_privileges") +} + +func TestReadCommittedLogic_system( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system") +} + +func TestReadCommittedLogic_system_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system_columns") +} + +func TestReadCommittedLogic_system_namespace( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system_namespace") +} + +func TestReadCommittedLogic_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "table") +} + +func TestReadCommittedLogic_target_names( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "target_names") +} + +func TestReadCommittedLogic_temp_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "temp_table") +} + +func TestReadCommittedLogic_temp_table_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "temp_table_txn") +} + +func TestReadCommittedLogic_tenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tenant") +} + +func TestReadCommittedLogic_time( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "time") +} + +func TestReadCommittedLogic_timestamp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "timestamp") +} + +func TestReadCommittedLogic_timetz( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "timetz") +} + +func TestReadCommittedLogic_trigram_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "trigram_builtins") +} + +func TestReadCommittedLogic_trigram_indexes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "trigram_indexes") +} + +func TestReadCommittedLogic_truncate( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "truncate") +} + +func TestReadCommittedLogic_truncate_with_concurrent_mutation( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "truncate_with_concurrent_mutation") +} + +func TestReadCommittedLogic_tsvector( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tsvector") +} + +func TestReadCommittedLogic_tuple( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tuple") +} + +func TestReadCommittedLogic_tuple_local( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tuple_local") +} + +func TestReadCommittedLogic_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn") +} + +func TestReadCommittedLogic_txn_as_of( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_as_of") +} + +func TestReadCommittedLogic_txn_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_retry") +} + +func TestReadCommittedLogic_txn_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_stats") +} + +func TestReadCommittedLogic_type_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "type_privileges") +} + +func TestReadCommittedLogic_typing( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "typing") +} + +func TestReadCommittedLogic_udf( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf") +} + +func TestReadCommittedLogic_udf_calling_udf( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_calling_udf") +} + +func TestReadCommittedLogic_udf_delete( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_delete") +} + +func TestReadCommittedLogic_udf_deps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_deps") +} + +func TestReadCommittedLogic_udf_fk( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_fk") +} + +func TestReadCommittedLogic_udf_in_column_defaults( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_in_column_defaults") +} + +func TestReadCommittedLogic_udf_in_constraints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_in_constraints") +} + +func TestReadCommittedLogic_udf_insert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_insert") +} + +func TestReadCommittedLogic_udf_observability( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_observability") +} + +func TestReadCommittedLogic_udf_oid_ref( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_oid_ref") +} + +func TestReadCommittedLogic_udf_options( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_options") +} + +func TestReadCommittedLogic_udf_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_params") +} + +func TestReadCommittedLogic_udf_polymorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_polymorphic") +} + +func TestReadCommittedLogic_udf_prepare( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_prepare") +} + +func TestReadCommittedLogic_udf_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_privileges") +} + +func TestReadCommittedLogic_udf_privileges_mutations( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_privileges_mutations") +} + +func TestReadCommittedLogic_udf_procedure_mix( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_procedure_mix") +} + +func TestReadCommittedLogic_udf_record( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_record") +} + +func TestReadCommittedLogic_udf_regressions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_regressions") +} + +func TestReadCommittedLogic_udf_rewrite( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_rewrite") +} + +func TestReadCommittedLogic_udf_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_schema_change") +} + +func TestReadCommittedLogic_udf_setof( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_setof") +} + +func TestReadCommittedLogic_udf_star( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_star") +} + +func TestReadCommittedLogic_udf_subquery( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_subquery") +} + +func TestReadCommittedLogic_udf_unsupported( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_unsupported") +} + +func TestReadCommittedLogic_udf_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_update") +} + +func TestReadCommittedLogic_udf_upsert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_upsert") +} + +func TestReadCommittedLogic_union( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "union") +} + +func TestReadCommittedLogic_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "update") +} + +func TestReadCommittedLogic_update_from( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "update_from") +} + +func TestReadCommittedLogic_upsert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "upsert") +} + +func TestReadCommittedLogic_upsert_non_metamorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "upsert_non_metamorphic") +} + +func TestReadCommittedLogic_uuid( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "uuid") +} + +func TestReadCommittedLogic_values( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "values") +} + +func TestReadCommittedLogic_vectorize_agg( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_agg") +} + +func TestReadCommittedLogic_vectorize_overloads( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_overloads") +} + +func TestReadCommittedLogic_vectorize_shutdown( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_shutdown") +} + +func TestReadCommittedLogic_vectorize_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_types") +} + +func TestReadCommittedLogic_vectorize_unsupported( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_unsupported") +} + +func TestReadCommittedLogic_views( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "views") +} + +func TestReadCommittedLogic_virtual_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "virtual_columns") +} + +func TestReadCommittedLogic_virtual_table_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "virtual_table_privileges") +} + +func TestReadCommittedLogic_void( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "void") +} + +func TestReadCommittedLogic_where( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "where") +} + +func TestReadCommittedLogic_window( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "window") +} + +func TestReadCommittedLogic_with( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "with") +} + +func TestReadCommittedLogic_workload_indexrecs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "workload_indexrecs") +} + +func TestReadCommittedLogic_zero( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zero") +} + +func TestReadCommittedLogic_zigzag_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zigzag_join") +} + +func TestReadCommittedLogic_zone_config( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zone_config") +} + +func TestReadCommittedLogic_zone_config_system_tenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zone_config_system_tenant") +} + func TestReadCommittedLogicCCL_fips_ready( t *testing.T, ) { diff --git a/pkg/ccl/logictestccl/tests/local-repeatable-read/BUILD.bazel b/pkg/ccl/logictestccl/tests/local-repeatable-read/BUILD.bazel new file mode 100644 index 000000000000..aa4980d9e687 --- /dev/null +++ b/pkg/ccl/logictestccl/tests/local-repeatable-read/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "local-repeatable-read_test", + size = "enormous", + srcs = ["generated_test.go"], + data = [ + "//c-deps:libgeos", # keep + "//pkg/ccl/logictestccl:testdata", # keep + "//pkg/sql/logictest:testdata", # keep + "//pkg/sql/opt/exec/execbuilder:testdata", # keep + ], + exec_properties = {"test.Pool": "large"}, + shard_count = 48, + tags = ["cpu:1"], + deps = [ + "//pkg/base", + "//pkg/build/bazel", + "//pkg/ccl", + "//pkg/security/securityassets", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/sql", + "//pkg/sql/logictest", + "//pkg/testutils/serverutils", + "//pkg/testutils/skip", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "//pkg/util/randutil", + ], +) diff --git a/pkg/ccl/logictestccl/tests/local-repeatable-read/generated_test.go b/pkg/ccl/logictestccl/tests/local-repeatable-read/generated_test.go new file mode 100644 index 000000000000..d0f9c3a80dfa --- /dev/null +++ b/pkg/ccl/logictestccl/tests/local-repeatable-read/generated_test.go @@ -0,0 +1,2782 @@ +// Copyright 2022 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +// Code generated by generate-logictest, DO NOT EDIT. + +package testlocal_repeatable_read + +import ( + "os" + "path/filepath" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/build/bazel" + "github.com/cockroachdb/cockroach/pkg/ccl" + "github.com/cockroachdb/cockroach/pkg/security/securityassets" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/logictest" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +const configIdx = 4 + +var logicTestDir string +var cclLogicTestDir string +var execBuildLogicTestDir string + +func init() { + if bazel.BuiltWithBazel() { + var err error + logicTestDir, err = bazel.Runfile("pkg/sql/logictest/testdata/logic_test") + if err != nil { + panic(err) + } + } else { + logicTestDir = "../../../../sql/logictest/testdata/logic_test" + } + if bazel.BuiltWithBazel() { + var err error + cclLogicTestDir, err = bazel.Runfile("pkg/ccl/logictestccl/testdata/logic_test") + if err != nil { + panic(err) + } + } else { + cclLogicTestDir = "../../../../ccl/logictestccl/testdata/logic_test" + } + if bazel.BuiltWithBazel() { + var err error + execBuildLogicTestDir, err = bazel.Runfile("pkg/sql/opt/exec/execbuilder/testdata") + if err != nil { + panic(err) + } + } else { + execBuildLogicTestDir = "../../../../sql/opt/exec/execbuilder/testdata" + } +} + +func TestMain(m *testing.M) { + defer ccl.TestingEnableEnterprise()() + securityassets.SetLoader(securitytest.EmbeddedAssets) + randutil.SeedForTests() + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + + defer serverutils.TestingSetDefaultTenantSelectionOverride( + base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(76378), + )() + + os.Exit(m.Run()) +} + +func runLogicTest(t *testing.T, file string) { + skip.UnderDeadlock(t, "times out and/or hangs") + logictest.RunLogicTest(t, logictest.TestServerArgs{}, configIdx, filepath.Join(logicTestDir, file)) +} +func runCCLLogicTest(t *testing.T, file string) { + skip.UnderDeadlock(t, "times out and/or hangs") + logictest.RunLogicTest(t, logictest.TestServerArgs{}, configIdx, filepath.Join(cclLogicTestDir, file)) +} +func runExecBuildLogicTest(t *testing.T, file string) { + defer sql.TestingOverrideExplainEnvVersion("CockroachDB execbuilder test version")() + skip.UnderDeadlock(t, "times out and/or hangs") + serverArgs := logictest.TestServerArgs{ + DisableWorkmemRandomization: true, + // Disable the direct scans in order to keep the output of EXPLAIN (VEC) + // deterministic. + DisableDirectColumnarScans: true, + } + logictest.RunLogicTest(t, serverArgs, configIdx, filepath.Join(execBuildLogicTestDir, file)) +} + +// TestLogic_tmp runs any tests that are prefixed with "_", in which a dedicated +// test is not generated for. This allows developers to create and run temporary +// test files that are not checked into the repository, without repeatedly +// regenerating and reverting changes to this file, generated_test.go. +// +// TODO(mgartner): Add file filtering so that individual files can be run, +// instead of all files with the "_" prefix. +func TestLogic_tmp(t *testing.T) { + defer leaktest.AfterTest(t)() + var glob string + glob = filepath.Join(logicTestDir, "_*") + logictest.RunLogicTests(t, logictest.TestServerArgs{}, configIdx, glob) + glob = filepath.Join(cclLogicTestDir, "_*") + logictest.RunLogicTests(t, logictest.TestServerArgs{}, configIdx, glob) + glob = filepath.Join(execBuildLogicTestDir, "_*") + serverArgs := logictest.TestServerArgs{ + DisableWorkmemRandomization: true, + } + logictest.RunLogicTests(t, serverArgs, configIdx, glob) +} + +func TestRepeatableReadLogic_aggregate( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "aggregate") +} + +func TestRepeatableReadLogic_alias_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alias_types") +} + +func TestRepeatableReadLogic_alter_column_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_column_type") +} + +func TestRepeatableReadLogic_alter_database_convert_to_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_database_convert_to_schema") +} + +func TestRepeatableReadLogic_alter_database_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_database_owner") +} + +func TestRepeatableReadLogic_alter_default_privileges_for_all_roles( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_all_roles") +} + +func TestRepeatableReadLogic_alter_default_privileges_for_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_schema") +} + +func TestRepeatableReadLogic_alter_default_privileges_for_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_sequence") +} + +func TestRepeatableReadLogic_alter_default_privileges_for_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_table") +} + +func TestRepeatableReadLogic_alter_default_privileges_for_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_for_type") +} + +func TestRepeatableReadLogic_alter_default_privileges_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_in_schema") +} + +func TestRepeatableReadLogic_alter_default_privileges_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_default_privileges_with_grant_option") +} + +func TestRepeatableReadLogic_alter_primary_key( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_primary_key") +} + +func TestRepeatableReadLogic_alter_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_role") +} + +func TestRepeatableReadLogic_alter_role_set( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_role_set") +} + +func TestRepeatableReadLogic_alter_schema_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_schema_owner") +} + +func TestRepeatableReadLogic_alter_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_sequence") +} + +func TestRepeatableReadLogic_alter_sequence_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_sequence_owner") +} + +func TestRepeatableReadLogic_alter_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_table") +} + +func TestRepeatableReadLogic_alter_table_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_table_owner") +} + +func TestRepeatableReadLogic_alter_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_type") +} + +func TestRepeatableReadLogic_alter_type_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_type_owner") +} + +func TestRepeatableReadLogic_alter_view_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "alter_view_owner") +} + +func TestRepeatableReadLogic_and_or( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "and_or") +} + +func TestRepeatableReadLogic_apply_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "apply_join") +} + +func TestRepeatableReadLogic_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "array") +} + +func TestRepeatableReadLogic_as_of( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "as_of") +} + +func TestRepeatableReadLogic_asyncpg( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "asyncpg") +} + +func TestRepeatableReadLogic_auto_span_config_reconciliation_job( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "auto_span_config_reconciliation_job") +} + +func TestRepeatableReadLogic_bit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "bit") +} + +func TestRepeatableReadLogic_builtin_function( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "builtin_function") +} + +func TestRepeatableReadLogic_builtin_function_notenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "builtin_function_notenant") +} + +func TestRepeatableReadLogic_bytes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "bytes") +} + +func TestRepeatableReadLogic_cascade( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cascade") +} + +func TestRepeatableReadLogic_case_sensitive_names( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "case_sensitive_names") +} + +func TestRepeatableReadLogic_cast( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cast") +} + +func TestRepeatableReadLogic_check_constraints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "check_constraints") +} + +func TestRepeatableReadLogic_cluster_settings( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cluster_settings") +} + +func TestRepeatableReadLogic_collatedstring( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring") +} + +func TestRepeatableReadLogic_collatedstring_constraint( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_constraint") +} + +func TestRepeatableReadLogic_collatedstring_index1( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_index1") +} + +func TestRepeatableReadLogic_collatedstring_index2( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_index2") +} + +func TestRepeatableReadLogic_collatedstring_normalization( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_normalization") +} + +func TestRepeatableReadLogic_collatedstring_nullinindex( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_nullinindex") +} + +func TestRepeatableReadLogic_collatedstring_uniqueindex1( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_uniqueindex1") +} + +func TestRepeatableReadLogic_collatedstring_uniqueindex2( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "collatedstring_uniqueindex2") +} + +func TestRepeatableReadLogic_comment_on( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "comment_on") +} + +func TestRepeatableReadLogic_composite_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "composite_types") +} + +func TestRepeatableReadLogic_computed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "computed") +} + +func TestRepeatableReadLogic_conditional( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "conditional") +} + +func TestRepeatableReadLogic_connect_privilege( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "connect_privilege") +} + +func TestRepeatableReadLogic_crdb_internal( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal") +} + +func TestRepeatableReadLogic_crdb_internal_catalog( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal_catalog") +} + +func TestRepeatableReadLogic_crdb_internal_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "crdb_internal_default_privileges") +} + +func TestRepeatableReadLogic_create_as( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_as") +} + +func TestRepeatableReadLogic_create_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_index") +} + +func TestRepeatableReadLogic_create_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "create_table") +} + +func TestRepeatableReadLogic_cross_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cross_join") +} + +func TestRepeatableReadLogic_cursor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "cursor") +} + +func TestRepeatableReadLogic_custom_escape_character( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "custom_escape_character") +} + +func TestRepeatableReadLogic_dangerous_statements( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "dangerous_statements") +} + +func TestRepeatableReadLogic_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "database") +} + +func TestRepeatableReadLogic_datetime( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "datetime") +} + +func TestRepeatableReadLogic_decimal( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "decimal") +} + +func TestRepeatableReadLogic_default( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "default") +} + +func TestRepeatableReadLogic_delete( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete") +} + +func TestRepeatableReadLogic_delete_batch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "delete_batch") +} + +func TestRepeatableReadLogic_dependencies( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "dependencies") +} + +func TestRepeatableReadLogic_discard( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "discard") +} + +func TestRepeatableReadLogic_disjunction_in_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "disjunction_in_join") +} + +func TestRepeatableReadLogic_distinct( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distinct") +} + +func TestRepeatableReadLogic_distinct_on( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distinct_on") +} + +func TestRepeatableReadLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + +func TestRepeatableReadLogic_distsql_automatic_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_stats") +} + +func TestRepeatableReadLogic_distsql_event_log( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_event_log") +} + +func TestRepeatableReadLogic_distsql_expr( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_expr") +} + +func TestRepeatableReadLogic_distsql_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_join") +} + +func TestRepeatableReadLogic_distsql_srfs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_srfs") +} + +func TestRepeatableReadLogic_drop_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_database") +} + +func TestRepeatableReadLogic_drop_function( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_function") +} + +func TestRepeatableReadLogic_drop_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_index") +} + +func TestRepeatableReadLogic_drop_owned_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_owned_by") +} + +func TestRepeatableReadLogic_drop_procedure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_procedure") +} + +func TestRepeatableReadLogic_drop_role_with_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_role_with_default_privileges") +} + +func TestRepeatableReadLogic_drop_role_with_default_privileges_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_role_with_default_privileges_in_schema") +} + +func TestRepeatableReadLogic_drop_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_schema") +} + +func TestRepeatableReadLogic_drop_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_sequence") +} + +func TestRepeatableReadLogic_drop_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_table") +} + +func TestRepeatableReadLogic_drop_temp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_temp") +} + +func TestRepeatableReadLogic_drop_type( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_type") +} + +func TestRepeatableReadLogic_drop_user( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_user") +} + +func TestRepeatableReadLogic_drop_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "drop_view") +} + +func TestRepeatableReadLogic_edge( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "edge") +} + +func TestRepeatableReadLogic_enums( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "enums") +} + +func TestRepeatableReadLogic_errors( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "errors") +} + +func TestRepeatableReadLogic_event_log( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "event_log") +} + +func TestRepeatableReadLogic_exclude_data_from_backup( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "exclude_data_from_backup") +} + +func TestRepeatableReadLogic_experimental_distsql_planning( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "experimental_distsql_planning") +} + +func TestRepeatableReadLogic_explain_analyze( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "explain_analyze") +} + +func TestRepeatableReadLogic_export( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "export") +} + +func TestRepeatableReadLogic_expression_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "expression_index") +} + +func TestRepeatableReadLogic_external_connection_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "external_connection_privileges") +} + +func TestRepeatableReadLogic_family( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "family") +} + +func TestRepeatableReadLogic_fk( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "fk") +} + +func TestRepeatableReadLogic_float( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "float") +} + +func TestRepeatableReadLogic_format( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "format") +} + +func TestRepeatableReadLogic_function_lookup( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "function_lookup") +} + +func TestRepeatableReadLogic_fuzzystrmatch( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "fuzzystrmatch") +} + +func TestRepeatableReadLogic_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial") +} + +func TestRepeatableReadLogic_geospatial_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_index") +} + +func TestRepeatableReadLogic_geospatial_meta( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_meta") +} + +func TestRepeatableReadLogic_geospatial_regression( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_regression") +} + +func TestRepeatableReadLogic_geospatial_zm( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "geospatial_zm") +} + +func TestRepeatableReadLogic_grant_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_database") +} + +func TestRepeatableReadLogic_grant_in_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_in_txn") +} + +func TestRepeatableReadLogic_grant_on_all_sequences_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_on_all_sequences_in_schema") +} + +func TestRepeatableReadLogic_grant_on_all_tables_in_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_on_all_tables_in_schema") +} + +func TestRepeatableReadLogic_grant_revoke_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_revoke_with_grant_option") +} + +func TestRepeatableReadLogic_grant_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_role") +} + +func TestRepeatableReadLogic_grant_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "grant_schema") +} + +func TestRepeatableReadLogic_group_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "group_join") +} + +func TestRepeatableReadLogic_hash_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hash_join") +} + +func TestRepeatableReadLogic_hash_sharded_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hash_sharded_index") +} + +func TestRepeatableReadLogic_hidden_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "hidden_columns") +} + +func TestRepeatableReadLogic_impure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "impure") +} + +func TestRepeatableReadLogic_index_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "index_join") +} + +func TestRepeatableReadLogic_inet( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inet") +} + +func TestRepeatableReadLogic_inflight_trace_spans( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inflight_trace_spans") +} + +func TestRepeatableReadLogic_information_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "information_schema") +} + +func TestRepeatableReadLogic_inner_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inner-join") +} + +func TestRepeatableReadLogic_insert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "insert") +} + +func TestRepeatableReadLogic_int_size( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "int_size") +} + +func TestRepeatableReadLogic_internal_executor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "internal_executor") +} + +func TestRepeatableReadLogic_interval( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "interval") +} + +func TestRepeatableReadLogic_inverted_filter_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_filter_geospatial") +} + +func TestRepeatableReadLogic_inverted_filter_json_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_filter_json_array") +} + +func TestRepeatableReadLogic_inverted_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_index") +} + +func TestRepeatableReadLogic_inverted_index_multi_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_index_multi_column") +} + +func TestRepeatableReadLogic_inverted_join_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_geospatial") +} + +func TestRepeatableReadLogic_inverted_join_json_array( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_json_array") +} + +func TestRepeatableReadLogic_inverted_join_multi_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "inverted_join_multi_column") +} + +func TestRepeatableReadLogic_jobs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "jobs") +} + +func TestRepeatableReadLogic_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "join") +} + +func TestRepeatableReadLogic_json( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json") +} + +func TestRepeatableReadLogic_json_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json_builtins") +} + +func TestRepeatableReadLogic_json_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "json_index") +} + +func TestRepeatableReadLogic_kv_builtin_functions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "kv_builtin_functions") +} + +func TestRepeatableReadLogic_limit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "limit") +} + +func TestRepeatableReadLogic_locality( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "locality") +} + +func TestRepeatableReadLogic_lock_timeout( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lock_timeout") +} + +func TestRepeatableReadLogic_lookup_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lookup_join") +} + +func TestRepeatableReadLogic_lookup_join_spans( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "lookup_join_spans") +} + +func TestRepeatableReadLogic_manual_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "manual_retry") +} + +func TestRepeatableReadLogic_materialized_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "materialized_view") +} + +func TestRepeatableReadLogic_merge_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "merge_join") +} + +func TestRepeatableReadLogic_multi_statement( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "multi_statement") +} + +func TestRepeatableReadLogic_name_escapes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "name_escapes") +} + +func TestRepeatableReadLogic_namespace( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "namespace") +} + +func TestRepeatableReadLogic_new_schema_changer( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "new_schema_changer") +} + +func TestRepeatableReadLogic_no_primary_key( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "no_primary_key") +} + +func TestRepeatableReadLogic_notice( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "notice") +} + +func TestRepeatableReadLogic_numeric_references( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "numeric_references") +} + +func TestRepeatableReadLogic_on_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "on_update") +} + +func TestRepeatableReadLogic_operator( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "operator") +} + +func TestRepeatableReadLogic_optimizer_timeout( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "optimizer_timeout") +} + +func TestRepeatableReadLogic_order_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "order_by") +} + +func TestRepeatableReadLogic_ordinal_references( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "ordinal_references") +} + +func TestRepeatableReadLogic_ordinality( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "ordinality") +} + +func TestRepeatableReadLogic_overflow( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "overflow") +} + +func TestRepeatableReadLogic_overlaps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "overlaps") +} + +func TestRepeatableReadLogic_owner( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "owner") +} + +func TestRepeatableReadLogic_parallel_stmts_compat( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "parallel_stmts_compat") +} + +func TestRepeatableReadLogic_partial_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "partial_index") +} + +func TestRepeatableReadLogic_partial_txn_commit( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "partial_txn_commit") +} + +func TestRepeatableReadLogic_pg_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_builtins") +} + +func TestRepeatableReadLogic_pg_catalog_pg_default_acl( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_catalog_pg_default_acl") +} + +func TestRepeatableReadLogic_pg_catalog_pg_default_acl_with_grant_option( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_catalog_pg_default_acl_with_grant_option") +} + +func TestRepeatableReadLogic_pg_extension( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_extension") +} + +func TestRepeatableReadLogic_pg_lsn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pg_lsn") +} + +func TestRepeatableReadLogic_pgcrypto_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pgcrypto_builtins") +} + +func TestRepeatableReadLogic_pgoidtype( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "pgoidtype") +} + +func TestRepeatableReadLogic_plpgsql_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "plpgsql_builtins") +} + +func TestRepeatableReadLogic_poison_after_push( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "poison_after_push") +} + +func TestRepeatableReadLogic_postgres_jsonb( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "postgres_jsonb") +} + +func TestRepeatableReadLogic_postgresjoin( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "postgresjoin") +} + +func TestRepeatableReadLogic_privilege_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privilege_builtins") +} + +func TestRepeatableReadLogic_privileges_comments( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privileges_comments") +} + +func TestRepeatableReadLogic_privileges_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "privileges_table") +} + +func TestRepeatableReadLogic_proc_invokes_proc( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "proc_invokes_proc") +} + +func TestRepeatableReadLogic_procedure( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure") +} + +func TestRepeatableReadLogic_procedure_deps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_deps") +} + +func TestRepeatableReadLogic_procedure_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_params") +} + +func TestRepeatableReadLogic_procedure_polymorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_polymorphic") +} + +func TestRepeatableReadLogic_procedure_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_privileges") +} + +func TestRepeatableReadLogic_procedure_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "procedure_schema_change") +} + +func TestRepeatableReadLogic_propagate_input_ordering( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "propagate_input_ordering") +} + +func TestRepeatableReadLogic_reassign_owned_by( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "reassign_owned_by") +} + +func TestRepeatableReadLogic_record( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "record") +} + +func TestRepeatableReadLogic_redact_descriptor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "redact_descriptor") +} + +func TestRepeatableReadLogic_rename_atomic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_atomic") +} + +func TestRepeatableReadLogic_rename_column( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_column") +} + +func TestRepeatableReadLogic_rename_constraint( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_constraint") +} + +func TestRepeatableReadLogic_rename_database( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_database") +} + +func TestRepeatableReadLogic_rename_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_index") +} + +func TestRepeatableReadLogic_rename_sequence( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_sequence") +} + +func TestRepeatableReadLogic_rename_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_table") +} + +func TestRepeatableReadLogic_rename_view( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rename_view") +} + +func TestRepeatableReadLogic_reset( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "reset") +} + +func TestRepeatableReadLogic_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "retry") +} + +func TestRepeatableReadLogic_returning( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "returning") +} + +func TestRepeatableReadLogic_routine_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "routine_schema_change") +} + +func TestRepeatableReadLogic_row_level_ttl( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "row_level_ttl") +} + +func TestRepeatableReadLogic_rows_from( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "rows_from") +} + +func TestRepeatableReadLogic_run_control( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "run_control") +} + +func TestRepeatableReadLogic_save_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "save_table") +} + +func TestRepeatableReadLogic_savepoints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "savepoints") +} + +func TestRepeatableReadLogic_scale( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scale") +} + +func TestRepeatableReadLogic_scatter( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scatter") +} + +func TestRepeatableReadLogic_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema") +} + +func TestRepeatableReadLogic_schema_change_feature_flags( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_feature_flags") +} + +func TestRepeatableReadLogic_schema_change_in_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_in_txn") +} + +func TestRepeatableReadLogic_schema_change_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_change_retry") +} + +func TestRepeatableReadLogic_schema_locked( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_locked") +} + +func TestRepeatableReadLogic_schema_repair( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "schema_repair") +} + +func TestRepeatableReadLogic_scrub( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "scrub") +} + +func TestRepeatableReadLogic_secondary_index_column_families( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "secondary_index_column_families") +} + +func TestRepeatableReadLogic_select( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select") +} + +func TestRepeatableReadLogic_select_for_share( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_for_share") +} + +func TestRepeatableReadLogic_select_index( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_index") +} + +func TestRepeatableReadLogic_select_index_flags( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_index_flags") +} + +func TestRepeatableReadLogic_select_search_path( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_search_path") +} + +func TestRepeatableReadLogic_select_table_alias( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "select_table_alias") +} + +func TestRepeatableReadLogic_sequences( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences") +} + +func TestRepeatableReadLogic_sequences_distsql( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences_distsql") +} + +func TestRepeatableReadLogic_sequences_regclass( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sequences_regclass") +} + +func TestRepeatableReadLogic_serial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "serial") +} + +func TestRepeatableReadLogic_serializable_eager_restart( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "serializable_eager_restart") +} + +func TestRepeatableReadLogic_set_local( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_local") +} + +func TestRepeatableReadLogic_set_role( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_role") +} + +func TestRepeatableReadLogic_set_schema( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_schema") +} + +func TestRepeatableReadLogic_set_time_zone( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "set_time_zone") +} + +func TestRepeatableReadLogic_shift( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "shift") +} + +func TestRepeatableReadLogic_show_commit_timestamp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_commit_timestamp") +} + +func TestRepeatableReadLogic_show_completions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_completions") +} + +func TestRepeatableReadLogic_show_create( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create") +} + +func TestRepeatableReadLogic_show_create_all_schemas( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_schemas") +} + +func TestRepeatableReadLogic_show_create_all_tables( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_tables") +} + +func TestRepeatableReadLogic_show_create_all_tables_builtin( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_tables_builtin") +} + +func TestRepeatableReadLogic_show_create_all_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_all_types") +} + +func TestRepeatableReadLogic_show_create_redact( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_create_redact") +} + +func TestRepeatableReadLogic_show_default_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_default_privileges") +} + +func TestRepeatableReadLogic_show_external_connections( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_external_connections") +} + +func TestRepeatableReadLogic_show_fingerprints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_fingerprints") +} + +func TestRepeatableReadLogic_show_grants_on_virtual_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_grants_on_virtual_table") +} + +func TestRepeatableReadLogic_show_grants_synthetic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_grants_synthetic") +} + +func TestRepeatableReadLogic_show_indexes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_indexes") +} + +func TestRepeatableReadLogic_show_transfer_state( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_transfer_state") +} + +func TestRepeatableReadLogic_show_var( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "show_var") +} + +func TestRepeatableReadLogic_span_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "span_builtins") +} + +func TestRepeatableReadLogic_split_at( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "split_at") +} + +func TestRepeatableReadLogic_sqllite( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqllite") +} + +func TestRepeatableReadLogic_sqlliveness( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqlliveness") +} + +func TestRepeatableReadLogic_sqlsmith( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "sqlsmith") +} + +func TestRepeatableReadLogic_srfs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "srfs") +} + +func TestRepeatableReadLogic_statement_source( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_source") +} + +func TestRepeatableReadLogic_statement_statistics_errors( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_statistics_errors") +} + +func TestRepeatableReadLogic_statement_statistics_errors_redacted( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "statement_statistics_errors_redacted") +} + +func TestRepeatableReadLogic_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "stats") +} + +func TestRepeatableReadLogic_storing( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "storing") +} + +func TestRepeatableReadLogic_strict_ddl_atomicity( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "strict_ddl_atomicity") +} + +func TestRepeatableReadLogic_suboperators( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "suboperators") +} + +func TestRepeatableReadLogic_subquery( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "subquery") +} + +func TestRepeatableReadLogic_subquery_correlated( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "subquery_correlated") +} + +func TestRepeatableReadLogic_synthetic_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "synthetic_privileges") +} + +func TestRepeatableReadLogic_system( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system") +} + +func TestRepeatableReadLogic_system_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system_columns") +} + +func TestRepeatableReadLogic_system_namespace( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "system_namespace") +} + +func TestRepeatableReadLogic_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "table") +} + +func TestRepeatableReadLogic_target_names( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "target_names") +} + +func TestRepeatableReadLogic_temp_table( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "temp_table") +} + +func TestRepeatableReadLogic_temp_table_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "temp_table_txn") +} + +func TestRepeatableReadLogic_tenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tenant") +} + +func TestRepeatableReadLogic_time( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "time") +} + +func TestRepeatableReadLogic_timestamp( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "timestamp") +} + +func TestRepeatableReadLogic_timetz( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "timetz") +} + +func TestRepeatableReadLogic_trigram_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "trigram_builtins") +} + +func TestRepeatableReadLogic_trigram_indexes( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "trigram_indexes") +} + +func TestRepeatableReadLogic_truncate( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "truncate") +} + +func TestRepeatableReadLogic_truncate_with_concurrent_mutation( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "truncate_with_concurrent_mutation") +} + +func TestRepeatableReadLogic_tsvector( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tsvector") +} + +func TestRepeatableReadLogic_tuple( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tuple") +} + +func TestRepeatableReadLogic_tuple_local( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "tuple_local") +} + +func TestRepeatableReadLogic_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn") +} + +func TestRepeatableReadLogic_txn_as_of( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_as_of") +} + +func TestRepeatableReadLogic_txn_retry( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_retry") +} + +func TestRepeatableReadLogic_txn_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "txn_stats") +} + +func TestRepeatableReadLogic_type_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "type_privileges") +} + +func TestRepeatableReadLogic_typing( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "typing") +} + +func TestRepeatableReadLogic_udf( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf") +} + +func TestRepeatableReadLogic_udf_calling_udf( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_calling_udf") +} + +func TestRepeatableReadLogic_udf_delete( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_delete") +} + +func TestRepeatableReadLogic_udf_deps( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_deps") +} + +func TestRepeatableReadLogic_udf_fk( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_fk") +} + +func TestRepeatableReadLogic_udf_in_column_defaults( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_in_column_defaults") +} + +func TestRepeatableReadLogic_udf_in_constraints( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_in_constraints") +} + +func TestRepeatableReadLogic_udf_insert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_insert") +} + +func TestRepeatableReadLogic_udf_observability( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_observability") +} + +func TestRepeatableReadLogic_udf_oid_ref( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_oid_ref") +} + +func TestRepeatableReadLogic_udf_options( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_options") +} + +func TestRepeatableReadLogic_udf_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_params") +} + +func TestRepeatableReadLogic_udf_polymorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_polymorphic") +} + +func TestRepeatableReadLogic_udf_prepare( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_prepare") +} + +func TestRepeatableReadLogic_udf_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_privileges") +} + +func TestRepeatableReadLogic_udf_privileges_mutations( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_privileges_mutations") +} + +func TestRepeatableReadLogic_udf_procedure_mix( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_procedure_mix") +} + +func TestRepeatableReadLogic_udf_record( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_record") +} + +func TestRepeatableReadLogic_udf_regressions( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_regressions") +} + +func TestRepeatableReadLogic_udf_rewrite( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_rewrite") +} + +func TestRepeatableReadLogic_udf_schema_change( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_schema_change") +} + +func TestRepeatableReadLogic_udf_setof( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_setof") +} + +func TestRepeatableReadLogic_udf_star( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_star") +} + +func TestRepeatableReadLogic_udf_subquery( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_subquery") +} + +func TestRepeatableReadLogic_udf_unsupported( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_unsupported") +} + +func TestRepeatableReadLogic_udf_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_update") +} + +func TestRepeatableReadLogic_udf_upsert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "udf_upsert") +} + +func TestRepeatableReadLogic_union( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "union") +} + +func TestRepeatableReadLogic_update( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "update") +} + +func TestRepeatableReadLogic_update_from( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "update_from") +} + +func TestRepeatableReadLogic_upsert( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "upsert") +} + +func TestRepeatableReadLogic_upsert_non_metamorphic( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "upsert_non_metamorphic") +} + +func TestRepeatableReadLogic_uuid( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "uuid") +} + +func TestRepeatableReadLogic_values( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "values") +} + +func TestRepeatableReadLogic_vectorize_agg( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_agg") +} + +func TestRepeatableReadLogic_vectorize_overloads( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_overloads") +} + +func TestRepeatableReadLogic_vectorize_shutdown( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_shutdown") +} + +func TestRepeatableReadLogic_vectorize_types( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_types") +} + +func TestRepeatableReadLogic_vectorize_unsupported( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "vectorize_unsupported") +} + +func TestRepeatableReadLogic_views( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "views") +} + +func TestRepeatableReadLogic_virtual_columns( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "virtual_columns") +} + +func TestRepeatableReadLogic_virtual_table_privileges( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "virtual_table_privileges") +} + +func TestRepeatableReadLogic_void( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "void") +} + +func TestRepeatableReadLogic_where( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "where") +} + +func TestRepeatableReadLogic_window( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "window") +} + +func TestRepeatableReadLogic_with( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "with") +} + +func TestRepeatableReadLogic_workload_indexrecs( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "workload_indexrecs") +} + +func TestRepeatableReadLogic_zero( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zero") +} + +func TestRepeatableReadLogic_zigzag_join( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zigzag_join") +} + +func TestRepeatableReadLogic_zone_config( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zone_config") +} + +func TestRepeatableReadLogic_zone_config_system_tenant( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "zone_config_system_tenant") +} + +func TestRepeatableReadLogicCCL_fips_ready( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "fips_ready") +} + +func TestRepeatableReadLogicCCL_fk_read_committed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "fk_read_committed") +} + +func TestRepeatableReadLogicCCL_hash_sharded_index_read_committed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "hash_sharded_index_read_committed") +} + +func TestRepeatableReadLogicCCL_nested_routines( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "nested_routines") +} + +func TestRepeatableReadLogicCCL_new_schema_changer( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "new_schema_changer") +} + +func TestRepeatableReadLogicCCL_partitioning_enum( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "partitioning_enum") +} + +func TestRepeatableReadLogicCCL_pgcrypto_builtins( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "pgcrypto_builtins") +} + +func TestRepeatableReadLogicCCL_plpgsql_block( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_block") +} + +func TestRepeatableReadLogicCCL_plpgsql_call( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_call") +} + +func TestRepeatableReadLogicCCL_plpgsql_cursor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_cursor") +} + +func TestRepeatableReadLogicCCL_plpgsql_into( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_into") +} + +func TestRepeatableReadLogicCCL_plpgsql_record( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_record") +} + +func TestRepeatableReadLogicCCL_plpgsql_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_txn") +} + +func TestRepeatableReadLogicCCL_plpgsql_unsupported( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "plpgsql_unsupported") +} + +func TestRepeatableReadLogicCCL_procedure_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "procedure_params") +} + +func TestRepeatableReadLogicCCL_procedure_plpgsql( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "procedure_plpgsql") +} + +func TestRepeatableReadLogicCCL_read_committed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "read_committed") +} + +func TestRepeatableReadLogicCCL_redact_descriptor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "redact_descriptor") +} + +func TestRepeatableReadLogicCCL_refcursor( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "refcursor") +} + +func TestRepeatableReadLogicCCL_schema_change_in_txn( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "schema_change_in_txn") +} + +func TestRepeatableReadLogicCCL_select_for_update_read_committed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "select_for_update_read_committed") +} + +func TestRepeatableReadLogicCCL_show_create( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "show_create") +} + +func TestRepeatableReadLogicCCL_subject( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "subject") +} + +func TestRepeatableReadLogicCCL_triggers( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "triggers") +} + +func TestRepeatableReadLogicCCL_udf_params( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "udf_params") +} + +func TestRepeatableReadLogicCCL_udf_plpgsql( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "udf_plpgsql") +} + +func TestRepeatableReadLogicCCL_udf_rewrite( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "udf_rewrite") +} + +func TestRepeatableReadLogicCCL_udf_volatility_check( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "udf_volatility_check") +} + +func TestRepeatableReadLogicCCL_unique_read_committed( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "unique_read_committed") +} + +func TestRepeatableReadLogicCCL_vector( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runCCLLogicTest(t, "vector") +} + +func TestRepeatableReadExecBuild_geospatial( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runExecBuildLogicTest(t, "geospatial") +} diff --git a/pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs/generated_test.go index f51c3bcaf5a8..d50a3d1d16cf 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-15node-5region-3azs/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 17 +const configIdx = 18 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/multiregion-3node-3superlongregions/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-3node-3superlongregions/generated_test.go index 7781e4461100..024f323bec61 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-3node-3superlongregions/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-3node-3superlongregions/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 12 +const configIdx = 13 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-no-los/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-no-los/generated_test.go index a7d213e742cf..0d7556c6e0a7 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-no-los/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-no-los/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 16 +const configIdx = 17 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-tenant/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-tenant/generated_test.go index 284d320a3cb2..553b2b90911f 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-tenant/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-tenant/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 14 +const configIdx = 15 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-vec-off/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-vec-off/generated_test.go index 8d43a5dd6223..5fe4e0cee0b4 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-vec-off/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs-vec-off/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 15 +const configIdx = 16 var cclLogicTestDir string diff --git a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs/generated_test.go b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs/generated_test.go index 875c32d30464..b2217436d367 100644 --- a/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs/generated_test.go +++ b/pkg/ccl/logictestccl/tests/multiregion-9node-3region-3azs/generated_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 13 +const configIdx = 14 var cclLogicTestDir string diff --git a/pkg/ccl/multiregionccl/multiregion_system_table_test.go b/pkg/ccl/multiregionccl/multiregion_system_table_test.go index 3c9b92de27e8..69fd019a32c4 100644 --- a/pkg/ccl/multiregionccl/multiregion_system_table_test.go +++ b/pkg/ccl/multiregionccl/multiregion_system_table_test.go @@ -300,6 +300,7 @@ func TestMrSystemDatabase(t *testing.T) { {"TABLE system.public.statement_diagnostics_requests"}, {"TABLE system.public.statement_execution_insights"}, {"TABLE system.public.statement_statistics"}, + {"TABLE system.public.table_metadata"}, {"TABLE system.public.task_payloads"}, {"TABLE system.public.tenant_settings"}, {"TABLE system.public.tenant_tasks"}, @@ -341,6 +342,7 @@ func TestMrSystemDatabase(t *testing.T) { {"TABLE system.public.statement_diagnostics_requests"}, {"TABLE system.public.statement_execution_insights"}, {"TABLE system.public.statement_statistics"}, + {"TABLE system.public.table_metadata"}, {"TABLE system.public.task_payloads"}, {"TABLE system.public.tenant_settings"}, {"TABLE system.public.tenant_tasks"}, diff --git a/pkg/ccl/multiregionccl/roundtrips_test.go b/pkg/ccl/multiregionccl/roundtrips_test.go index 6a9ab713845f..c39c46778959 100644 --- a/pkg/ccl/multiregionccl/roundtrips_test.go +++ b/pkg/ccl/multiregionccl/roundtrips_test.go @@ -125,7 +125,7 @@ func TestEnsureLocalReadsOnGlobalTables(t *testing.T) { cache := tc.Server(i).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache() entry, err := cache.TestingGetCached(context.Background(), tablePrefix, false /* inverted */) require.NoError(t, err) - require.NotNil(t, entry.Lease.Empty()) + require.False(t, entry.Lease.Empty()) if expected, got := roachpb.LEAD_FOR_GLOBAL_READS, entry.ClosedTimestampPolicy; got != expected { return errors.Newf("expected closedts policy %s, got %s", expected, got) diff --git a/pkg/ccl/multitenantccl/tenantcostclient/metrics.go b/pkg/ccl/multitenantccl/tenantcostclient/metrics.go index 4e2fed2d5292..fe89c8397c4f 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/metrics.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/metrics.go @@ -207,13 +207,13 @@ func (m *metrics) Init(locality roachpb.Locality) { m.ProvisionedVcpus = metric.NewGauge(metaProvisionedVcpus) // Metric labels for KV replication traffic will be derived from the SQL - // server's locality. e.g. {"from_region", "from_az", "to_region", "to_az"}. + // server's locality. e.g. {"source_region", "source_az", "destination_region", "destination_az"}. var labels []string for _, t := range locality.Tiers { - labels = append(labels, fmt.Sprintf("from_%s", t.Key)) + labels = append(labels, fmt.Sprintf("source_%s", t.Key)) } for _, t := range locality.Tiers { - labels = append(labels, fmt.Sprintf("to_%s", t.Key)) + labels = append(labels, fmt.Sprintf("destination_%s", t.Key)) } m.EstimatedReplicationBytes = aggmetric.NewCounter(metaTotalEstimatedReplicationBytes, labels...) m.mu.pathMetrics = make(map[string]*networkPathMetrics) diff --git a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go index e4e6af5cde96..8c25cb0315d8 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go +++ b/pkg/ccl/multitenantccl/tenantcostclient/tenant_side_test.go @@ -223,7 +223,7 @@ func parseArgs(t *testing.T, d *datadriven.TestData) cmdArgs { case "bytes": v, err := parseBytesVal(args) if err != nil { - d.Fatalf(t, err.Error()) + d.Fatalf(t, "%s", err) } res.bytes = v diff --git a/pkg/ccl/multitenantccl/tenantcostclient/testdata/estimated-cpu b/pkg/ccl/multitenantccl/tenantcostclient/testdata/estimated-cpu index 827e31fc6718..0e64008812fd 100644 --- a/pkg/ccl/multitenantccl/tenantcostclient/testdata/estimated-cpu +++ b/pkg/ccl/multitenantccl/tenantcostclient/testdata/estimated-cpu @@ -42,7 +42,7 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 0.24 tenant.sql_usage.estimated_cpu_seconds: 0.24 tenant.sql_usage.estimated_replication_bytes: 145460 tenant.sql_usage.provisioned_vcpus: 12 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 145460 # Wait for the token bucket response triggered by low tokens. Not doing this # causes a race condition, since in some cases this response arrives after the @@ -81,7 +81,7 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 0.24 tenant.sql_usage.estimated_cpu_seconds: 0.31 tenant.sql_usage.estimated_replication_bytes: 145460 tenant.sql_usage.provisioned_vcpus: 12 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 145460 # Do same writes, but with a different write batch rate. This time, the # estimated CPU consumption should be less. @@ -135,8 +135,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 0.42 tenant.sql_usage.estimated_cpu_seconds: 0.56 tenant.sql_usage.estimated_replication_bytes: 290920 tenant.sql_usage.provisioned_vcpus: 12 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 218190 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 72730 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 218190 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 72730 # Advance time to next period and do same writes, with the same write batch # rate, but with a global estimated CPU rate. The estimated CPU rate should not @@ -190,8 +190,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 0.61 tenant.sql_usage.estimated_cpu_seconds: 0.81 tenant.sql_usage.estimated_replication_bytes: 436380 tenant.sql_usage.provisioned_vcpus: 12 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 290920 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 290920 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # Update provisioned vCPUs. provisioned-vcpus count=48 @@ -228,8 +228,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 0.62 tenant.sql_usage.estimated_cpu_seconds: 0.82 tenant.sql_usage.estimated_replication_bytes: 456480 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 311020 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 311020 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # Now perform some read operations. @@ -264,8 +264,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 2.24 tenant.sql_usage.estimated_cpu_seconds: 2.97 tenant.sql_usage.estimated_replication_bytes: 456480 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 311020 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 311020 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # KV CPU seconds should not change, only total CPU seconds. Background CPU usage # should be accounted for. @@ -301,8 +301,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 2.24 tenant.sql_usage.estimated_cpu_seconds: 4.28 tenant.sql_usage.estimated_replication_bytes: 456480 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 311020 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 311020 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # External I/O should not block or consume tokens. external-egress bytes=1024000 @@ -339,8 +339,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 2.24 tenant.sql_usage.estimated_cpu_seconds: 4.28 tenant.sql_usage.estimated_replication_bytes: 456480 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 311020 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 311020 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # PGWire egress should not block or consume tokens. pgwire-egress @@ -375,8 +375,8 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 2.24 tenant.sql_usage.estimated_cpu_seconds: 4.28 tenant.sql_usage.estimated_replication_bytes: 456480 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 311020 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 145460 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 311020 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 145460 # Ensure that token bucket request is made after 10 seconds (though it returns # no tokens). @@ -426,9 +426,9 @@ tenant.sql_usage.estimated_kv_cpu_seconds: 2.27 tenant.sql_usage.estimated_cpu_seconds: 4.32 tenant.sql_usage.estimated_replication_bytes: 462080 tenant.sql_usage.provisioned_vcpus: 48 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="",to_region="europe-west1",to_zone=""}: 2800 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az1"}: 312420 -tenant.sql_usage.estimated_replication_bytes{from_region="us-central1",from_zone="az1",to_region="us-central1",to_zone="az2"}: 146860 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="",destination_region="europe-west1",destination_zone=""}: 2800 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az1"}: 312420 +tenant.sql_usage.estimated_replication_bytes{source_region="us-central1",source_zone="az1",destination_region="us-central1",destination_zone="az2"}: 146860 # Allow the provider to grant tokens again. configure diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index d999ff13fe52..458c81aa6f81 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -273,6 +273,9 @@ func TestInvalidIndexPartitionSetShowZones(t *testing.T) { "SHOW ZONE CONFIGURATION FOR INDEX foo", `index "foo" does not exist`, }, + // N.B. The following will always fallback to the legacy schema changer + // because multi-statement txns are not yet supported by our declarative + // schema changer. { "USE system; ALTER INDEX foo CONFIGURE ZONE USING DEFAULT", `index "foo" does not exist`, diff --git a/pkg/ccl/schemachangerccl/backup_base_generated_test.go b/pkg/ccl/schemachangerccl/backup_base_generated_test.go index 4e5e35c7ea38..e7e01bc8dc67 100644 --- a/pkg/ccl/schemachangerccl/backup_base_generated_test.go +++ b/pkg/ccl/schemachangerccl/backup_base_generated_test.go @@ -53,6 +53,13 @@ func TestBackupRollbacks_base_add_column_no_default(t *testing.T) { sctest.BackupRollbacks(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestBackupRollbacks_base_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.BackupRollbacks(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestBackupRollbacks_base_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -557,6 +564,13 @@ func TestBackupRollbacksMixedVersion_base_add_column_no_default(t *testing.T) { sctest.BackupRollbacksMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestBackupRollbacksMixedVersion_base_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.BackupRollbacksMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestBackupRollbacksMixedVersion_base_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1061,6 +1075,13 @@ func TestBackupSuccess_base_add_column_no_default(t *testing.T) { sctest.BackupSuccess(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestBackupSuccess_base_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.BackupSuccess(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestBackupSuccess_base_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1565,6 +1586,13 @@ func TestBackupSuccessMixedVersion_base_add_column_no_default(t *testing.T) { sctest.BackupSuccessMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestBackupSuccessMixedVersion_base_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.BackupSuccessMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestBackupSuccessMixedVersion_base_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/schemachangerccl/ccl_generated_test.go b/pkg/ccl/schemachangerccl/ccl_generated_test.go index 27a92aba5e65..cb91f20fc128 100644 --- a/pkg/ccl/schemachangerccl/ccl_generated_test.go +++ b/pkg/ccl/schemachangerccl/ccl_generated_test.go @@ -18,6 +18,20 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" ) +func TestBackupRollbacks_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.BackupRollbacks(t, path, MultiRegionTestClusterFactory{}) +} + +func TestBackupRollbacks_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.BackupRollbacks(t, path, MultiRegionTestClusterFactory{}) +} + func TestBackupRollbacks_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -46,6 +60,20 @@ func TestBackupRollbacks_ccl_drop_table_multiregion_primary_region(t *testing.T) sctest.BackupRollbacks(t, path, MultiRegionTestClusterFactory{}) } +func TestBackupRollbacksMixedVersion_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.BackupRollbacksMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + +func TestBackupRollbacksMixedVersion_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.BackupRollbacksMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + func TestBackupRollbacksMixedVersion_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -74,6 +102,20 @@ func TestBackupRollbacksMixedVersion_ccl_drop_table_multiregion_primary_region(t sctest.BackupRollbacksMixedVersion(t, path, MultiRegionTestClusterFactory{}) } +func TestBackupSuccess_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.BackupSuccess(t, path, MultiRegionTestClusterFactory{}) +} + +func TestBackupSuccess_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.BackupSuccess(t, path, MultiRegionTestClusterFactory{}) +} + func TestBackupSuccess_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -102,6 +144,20 @@ func TestBackupSuccess_ccl_drop_table_multiregion_primary_region(t *testing.T) { sctest.BackupSuccess(t, path, MultiRegionTestClusterFactory{}) } +func TestBackupSuccessMixedVersion_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.BackupSuccessMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + +func TestBackupSuccessMixedVersion_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.BackupSuccessMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + func TestBackupSuccessMixedVersion_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -130,6 +186,20 @@ func TestBackupSuccessMixedVersion_ccl_drop_table_multiregion_primary_region(t * sctest.BackupSuccessMixedVersion(t, path, MultiRegionTestClusterFactory{}) } +func TestEndToEndSideEffects_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.EndToEndSideEffects(t, path, MultiRegionTestClusterFactory{}) +} + +func TestEndToEndSideEffects_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.EndToEndSideEffects(t, path, MultiRegionTestClusterFactory{}) +} + func TestEndToEndSideEffects_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -158,6 +228,20 @@ func TestEndToEndSideEffects_ccl_drop_table_multiregion_primary_region(t *testin sctest.EndToEndSideEffects(t, path, MultiRegionTestClusterFactory{}) } +func TestExecuteWithDMLInjection_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.ExecuteWithDMLInjection(t, path, MultiRegionTestClusterFactory{}) +} + +func TestExecuteWithDMLInjection_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.ExecuteWithDMLInjection(t, path, MultiRegionTestClusterFactory{}) +} + func TestExecuteWithDMLInjection_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -186,6 +270,20 @@ func TestExecuteWithDMLInjection_ccl_drop_table_multiregion_primary_region(t *te sctest.ExecuteWithDMLInjection(t, path, MultiRegionTestClusterFactory{}) } +func TestGenerateSchemaChangeCorpus_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.GenerateSchemaChangeCorpus(t, path, MultiRegionTestClusterFactory{}) +} + +func TestGenerateSchemaChangeCorpus_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.GenerateSchemaChangeCorpus(t, path, MultiRegionTestClusterFactory{}) +} + func TestGenerateSchemaChangeCorpus_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -214,6 +312,20 @@ func TestGenerateSchemaChangeCorpus_ccl_drop_table_multiregion_primary_region(t sctest.GenerateSchemaChangeCorpus(t, path, MultiRegionTestClusterFactory{}) } +func TestPause_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.Pause(t, path, MultiRegionTestClusterFactory{}) +} + +func TestPause_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.Pause(t, path, MultiRegionTestClusterFactory{}) +} + func TestPause_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -242,6 +354,20 @@ func TestPause_ccl_drop_table_multiregion_primary_region(t *testing.T) { sctest.Pause(t, path, MultiRegionTestClusterFactory{}) } +func TestPauseMixedVersion_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.PauseMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + +func TestPauseMixedVersion_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.PauseMixedVersion(t, path, MultiRegionTestClusterFactory{}) +} + func TestPauseMixedVersion_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -270,6 +396,20 @@ func TestPauseMixedVersion_ccl_drop_table_multiregion_primary_region(t *testing. sctest.PauseMixedVersion(t, path, MultiRegionTestClusterFactory{}) } +func TestRollback_ccl_alter_index_configure_zone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone" + sctest.Rollback(t, path, MultiRegionTestClusterFactory{}) +} + +func TestRollback_ccl_alter_index_configure_zone_multiple(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple" + sctest.Rollback(t, path, MultiRegionTestClusterFactory{}) +} + func TestRollback_ccl_create_index(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/schemachangerccl/testdata/decomp/multiregion b/pkg/ccl/schemachangerccl/testdata/decomp/multiregion index ea3db6791fa5..93fcdefea301 100644 --- a/pkg/ccl/schemachangerccl/testdata/decomp/multiregion +++ b/pkg/ccl/schemachangerccl/testdata/decomp/multiregion @@ -1063,16 +1063,103 @@ ElementState: - IndexZoneConfig: indexId: 1 partitionName: us-east1 + seqNum: 0 + subzone: + config: + constraints: [] + gc: null + globalReads: null + inheritedConstraints: true + inheritedLeasePreferences: false + leasePreferences: + - constraints: + - key: region + type: REQUIRED + value: us-east1 + nullVoterConstraintsIsEmpty: true + numReplicas: null + numVoters: 5 + rangeMaxBytes: null + rangeMinBytes: null + subzoneSpans: [] + subzones: [] + voterConstraints: + - constraints: + - key: region + type: REQUIRED + value: us-east1 + numReplicas: 2 + indexId: 1 + partitionName: us-east1 + subzoneSpans: [] tableId: 108 Status: PUBLIC - IndexZoneConfig: indexId: 1 partitionName: us-east2 + seqNum: 0 + subzone: + config: + constraints: [] + gc: null + globalReads: null + inheritedConstraints: true + inheritedLeasePreferences: false + leasePreferences: + - constraints: + - key: region + type: REQUIRED + value: us-east2 + nullVoterConstraintsIsEmpty: true + numReplicas: null + numVoters: 5 + rangeMaxBytes: null + rangeMinBytes: null + subzoneSpans: [] + subzones: [] + voterConstraints: + - constraints: + - key: region + type: REQUIRED + value: us-east2 + numReplicas: 2 + indexId: 1 + partitionName: us-east2 + subzoneSpans: [] tableId: 108 Status: PUBLIC - IndexZoneConfig: indexId: 1 partitionName: us-east3 + seqNum: 0 + subzone: + config: + constraints: [] + gc: null + globalReads: null + inheritedConstraints: true + inheritedLeasePreferences: false + leasePreferences: + - constraints: + - key: region + type: REQUIRED + value: us-east3 + nullVoterConstraintsIsEmpty: true + numReplicas: null + numVoters: 5 + rangeMaxBytes: null + rangeMinBytes: null + subzoneSpans: [] + subzones: [] + voterConstraints: + - constraints: + - key: region + type: REQUIRED + value: us-east3 + numReplicas: 2 + indexId: 1 + partitionName: us-east3 + subzoneSpans: [] tableId: 108 Status: PUBLIC - Namespace: diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.definition b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.definition new file mode 100644 index 000000000000..cdc875cf1793 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.definition @@ -0,0 +1,8 @@ +setup +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); +---- + +test +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain new file mode 100644 index 000000000000..2fd8aee84199 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain @@ -0,0 +1,25 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›, ‹"gc.ttlseconds"› = ‹10000›; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ └── 1 Mutation operation + │ └── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── PreCommitPhase + ├── Stage 1 of 2 in PreCommitPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ └── 1 Mutation operation + │ └── UndoAllInTxnImmediateMutationOpSideEffects + └── Stage 2 of 2 in PreCommitPhase + ├── 1 element transitioning toward PUBLIC + │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + └── 1 Mutation operation + └── AddIndexZoneConfig {"IndexID":2,"TableID":104} diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain_shape b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain_shape new file mode 100644 index 000000000000..e5387e6ab28d --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.explain_shape @@ -0,0 +1,9 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL, SHAPE) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›, ‹"gc.ttlseconds"› = ‹10000›; + └── execute 1 system table mutations transaction diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.side_effects b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.side_effects new file mode 100644 index 000000000000..b23996af169e --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone/alter_index_configure_zone.side_effects @@ -0,0 +1,33 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); +---- +... ++object {100 101 t} -> 104 + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- +begin transaction #1 +# begin StatementPhase +checking for feature: CONFIGURE ZONE +write *eventpb.AlterTable to event log: + mutationId: 1 + sql: + descriptorId: 104 + statement: ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›, ‹"gc.ttlseconds"› = ‹10000› + tag: CONFIGURE ZONE + user: root + tableName: defaultdb.public.t +## StatementPhase stage 1 of 1 with 1 MutationType op +upsert zone config for #104 +# end StatementPhase +# begin PreCommitPhase +## PreCommitPhase stage 1 of 2 with 1 MutationType op +undo all catalog changes within txn #1 +persist all catalog changes to storage +## PreCommitPhase stage 2 of 2 with 1 MutationType op +upsert zone config for #104 +persist all catalog changes to storage +# end PreCommitPhase +commit transaction #1 diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.definition b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.definition new file mode 100644 index 000000000000..9cd84439bac8 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.definition @@ -0,0 +1,10 @@ +setup +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); +---- + +test +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 10; +---- diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain new file mode 100644 index 000000000000..f115c1e80113 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain @@ -0,0 +1,25 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›, ‹"gc.ttlseconds"› = ‹10000›; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 0, SeqNum: 1} + │ └── 1 Mutation operation + │ └── AddIndexZoneConfig {"TableID":104} + └── PreCommitPhase + ├── Stage 1 of 2 in PreCommitPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 0, SeqNum: 1} + │ └── 1 Mutation operation + │ └── UndoAllInTxnImmediateMutationOpSideEffects + └── Stage 2 of 2 in PreCommitPhase + ├── 1 element transitioning toward PUBLIC + │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 0, SeqNum: 1} + └── 1 Mutation operation + └── AddIndexZoneConfig {"TableID":104} diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain_shape b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain_shape new file mode 100644 index 000000000000..e5387e6ab28d --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.explain_shape @@ -0,0 +1,9 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL, SHAPE) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7, gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›, ‹"gc.ttlseconds"› = ‹10000›; + └── execute 1 system table mutations transaction diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.side_effects b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.side_effects new file mode 100644 index 000000000000..099d632b6f15 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple.side_effects @@ -0,0 +1,57 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); +---- +... ++object {100 101 t} -> 104 + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 10; +---- +begin transaction #1 +# begin StatementPhase +checking for feature: CONFIGURE ZONE +write *eventpb.AlterTable to event log: + mutationId: 1 + sql: + descriptorId: 104 + statement: ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7› + tag: CONFIGURE ZONE + user: root + tableName: defaultdb.public.t +## StatementPhase stage 1 of 1 with 1 MutationType op +upsert zone config for #104 +checking for feature: CONFIGURE ZONE +write *eventpb.AlterTable to event log: + mutationId: 1 + sql: + descriptorId: 104 + statement: ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹"gc.ttlseconds"› = ‹10000› + tag: CONFIGURE ZONE + user: root + tableName: defaultdb.public.t +## StatementPhase stage 1 of 1 with 1 MutationType op +upsert zone config for #104 +checking for feature: CONFIGURE ZONE +write *eventpb.AlterTable to event log: + mutationId: 1 + sql: + descriptorId: 104 + statement: ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹10› + tag: CONFIGURE ZONE + user: root + tableName: defaultdb.public.t +## StatementPhase stage 1 of 1 with 1 MutationType op +upsert zone config for #104 +# end StatementPhase +# begin PreCommitPhase +## PreCommitPhase stage 1 of 2 with 1 MutationType op +undo all catalog changes within txn #1 +persist all catalog changes to storage +## PreCommitPhase stage 2 of 2 with 3 MutationType ops +upsert zone config for #104 +persist all catalog changes to storage +# end PreCommitPhase +commit transaction #1 diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain new file mode 100644 index 000000000000..05d3ab8c2972 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain @@ -0,0 +1,25 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ └── 1 Mutation operation + │ └── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── PreCommitPhase + ├── Stage 1 of 2 in PreCommitPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ └── 1 Mutation operation + │ └── UndoAllInTxnImmediateMutationOpSideEffects + └── Stage 2 of 2 in PreCommitPhase + ├── 1 element transitioning toward PUBLIC + │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + └── 1 Mutation operation + └── AddIndexZoneConfig {"IndexID":2,"TableID":104} diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain_shape b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain_shape new file mode 100644 index 000000000000..c67a70d6ee9d --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_1_of_3.explain_shape @@ -0,0 +1,9 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +EXPLAIN (DDL, SHAPE) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; + └── execute 1 system table mutations transaction diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain new file mode 100644 index 000000000000..cfd192dd5bb3 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain @@ -0,0 +1,29 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +EXPLAIN (DDL) ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹"gc.ttlseconds"› = ‹10000›; following ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 2} + │ └── 1 Mutation operation + │ └── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── PreCommitPhase + ├── Stage 1 of 2 in PreCommitPhase + │ ├── 2 elements transitioning toward PUBLIC + │ │ ├── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ │ └── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 2} + │ └── 1 Mutation operation + │ └── UndoAllInTxnImmediateMutationOpSideEffects + └── Stage 2 of 2 in PreCommitPhase + ├── 2 elements transitioning toward PUBLIC + │ ├── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 2} + └── 2 Mutation operations + ├── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── AddIndexZoneConfig {"IndexID":2,"TableID":104} diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain_shape b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain_shape new file mode 100644 index 000000000000..dd4ac2d53639 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_2_of_3.explain_shape @@ -0,0 +1,10 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +EXPLAIN (DDL, SHAPE) ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹"gc.ttlseconds"› = ‹10000›; following ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; + └── execute 1 system table mutations transaction diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain new file mode 100644 index 000000000000..82cf275912b1 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain @@ -0,0 +1,33 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +EXPLAIN (DDL) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 10; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹10›; following ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹"gc.ttlseconds"› = ‹10000›; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 1 element transitioning toward PUBLIC + │ │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 3} + │ └── 1 Mutation operation + │ └── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── PreCommitPhase + ├── Stage 1 of 2 in PreCommitPhase + │ ├── 3 elements transitioning toward PUBLIC + │ │ ├── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ │ ├── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 2} + │ │ └── PUBLIC → ABSENT IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 3} + │ └── 1 Mutation operation + │ └── UndoAllInTxnImmediateMutationOpSideEffects + └── Stage 2 of 2 in PreCommitPhase + ├── 3 elements transitioning toward PUBLIC + │ ├── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 1} + │ ├── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 2} + │ └── ABSENT → PUBLIC IndexZoneConfig:{DescID: 104 (t), IndexID: 2 (foo), SeqNum: 3} + └── 3 Mutation operations + ├── AddIndexZoneConfig {"IndexID":2,"TableID":104} + ├── AddIndexZoneConfig {"IndexID":2,"TableID":104} + └── AddIndexZoneConfig {"IndexID":2,"TableID":104} diff --git a/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain_shape b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain_shape new file mode 100644 index 000000000000..57c4392f19a9 --- /dev/null +++ b/pkg/ccl/schemachangerccl/testdata/end_to_end/alter_index_configure_zone_multiple/alter_index_configure_zone_multiple__statement_3_of_3.explain_shape @@ -0,0 +1,11 @@ +/* setup */ +CREATE TABLE t(i int); +CREATE INDEX foo ON t(i); + +/* test */ +ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 7; +ALTER INDEX t@foo CONFIGURE ZONE USING gc.ttlseconds = 10000; +EXPLAIN (DDL, SHAPE) ALTER INDEX t@foo CONFIGURE ZONE USING num_replicas = 10; +---- +Schema change plan for ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹10›; following ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹num_replicas› = ‹7›; ALTER INDEX ‹defaultdb›.‹public›.‹t›@‹foo› CONFIGURE ZONE USING ‹"gc.ttlseconds"› = ‹10000›; + └── execute 1 system table mutations transaction diff --git a/pkg/ccl/serverccl/diagnosticsccl/BUILD.bazel b/pkg/ccl/serverccl/diagnosticsccl/BUILD.bazel index a6b48f153b55..d2b57cf47f3a 100644 --- a/pkg/ccl/serverccl/diagnosticsccl/BUILD.bazel +++ b/pkg/ccl/serverccl/diagnosticsccl/BUILD.bazel @@ -32,6 +32,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/system", + "//pkg/util/timeutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go index f020ee727827..edba63e76f01 100644 --- a/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go +++ b/pkg/ccl/serverccl/diagnosticsccl/reporter_test.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/system" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" @@ -277,6 +278,68 @@ func TestServerReport(t *testing.T) { } } +func TestTelemetry_SuccessfulTelemetryPing(t *testing.T) { + defer leaktest.AfterTest(t)() + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + rt := startReporterTest(t, base.TestIsSpecificToStorageLayerAndNeedsASystemTenant) + defer rt.Close() + + ctx := context.Background() + setupCluster(t, rt.serverDB) + + for _, tc := range []struct { + name string + respError error + respCode int + expectTimestampUpdate bool + }{ + { + name: "200 response", + respError: nil, + respCode: 200, + expectTimestampUpdate: true, + }, + { + name: "400 response", + respError: nil, + respCode: 400, + expectTimestampUpdate: true, + }, + { + name: "500 response", + respError: nil, + respCode: 500, + expectTimestampUpdate: true, + }, + { + name: "connection error", + respError: errors.New("connection refused"), + expectTimestampUpdate: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + defer rt.diagServer.SetRespError(tc.respError)() + defer rt.diagServer.SetRespCode(tc.respCode)() + + dr := rt.server.DiagnosticsReporter().(*diagnostics.Reporter) + + before := timeutil.Now().Unix() + oldTimestamp := dr.LastSuccessfulTelemetryPing.Load() + require.LessOrEqual(t, dr.LastSuccessfulTelemetryPing.Load(), before) + dr.ReportDiagnostics(ctx) + + if tc.expectTimestampUpdate { + require.GreaterOrEqual(t, dr.LastSuccessfulTelemetryPing.Load(), before) + } else { + require.Equal(t, oldTimestamp, dr.LastSuccessfulTelemetryPing.Load()) + } + }) + } + +} + func TestUsageQuantization(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/basic b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/basic index 85d3f1a1b0c7..38c7a9cbd7e5 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/basic +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/basic @@ -74,6 +74,7 @@ upsert /Table/6{3-4} database system (host) upsert /Table/6{4-5} database system (host) upsert /Table/6{5-6} database system (host) upsert /Table/6{6-7} database system (host) +upsert /Table/6{7-8} database system (host) exec-sql CREATE DATABASE db; @@ -130,6 +131,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} num_replicas=7 num_voters=5 /Table/10{7-8} num_replicas=7 /Table/11{2-3} num_replicas=7 @@ -247,6 +249,8 @@ delete /Table/6{5-6} upsert /Table/6{5-6} ttl_seconds=100 ignore_strict_gc=true num_replicas=5 rangefeed_enabled=true delete /Table/6{6-7} upsert /Table/6{6-7} ttl_seconds=100 ignore_strict_gc=true num_replicas=5 rangefeed_enabled=true +delete /Table/6{7-8} +upsert /Table/6{7-8} ttl_seconds=100 ignore_strict_gc=true num_replicas=5 rangefeed_enabled=true state offset=5 limit=42 ---- diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/indexes b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/indexes index badedd2ef41c..f14ef9b03779 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/indexes +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/indexes @@ -41,6 +41,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} range default exec-sql @@ -82,6 +83,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/2} num_replicas=7 /Table/106/{2-3} num_replicas=7 num_voters=5 /Table/10{6/3-7} num_replicas=7 @@ -125,6 +127,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/2} ttl_seconds=3600 num_replicas=7 /Table/106/{2-3} ttl_seconds=25 num_replicas=7 num_voters=5 /Table/10{6/3-7} ttl_seconds=3600 num_replicas=7 @@ -158,6 +161,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/2} ttl_seconds=3600 num_replicas=9 /Table/106/{2-3} ttl_seconds=25 num_replicas=9 num_voters=5 /Table/10{6/3-7} ttl_seconds=3600 num_replicas=9 @@ -205,3 +209,4 @@ state offset=46 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/basic b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/basic index 6e381711e8a2..f3cd74c1dbf4 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/basic +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/basic @@ -42,6 +42,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/10{-\x00} database system (tenant) /Tenant/11{-\x00} database system (tenant) /Tenant/12{-\x00} database system (tenant) @@ -109,6 +110,7 @@ upsert /Tenant/10/Table/6{3-4} database system (tenant) upsert /Tenant/10/Table/6{4-5} database system (tenant) upsert /Tenant/10/Table/6{5-6} database system (tenant) upsert /Tenant/10/Table/6{6-7} database system (tenant) +upsert /Tenant/10/Table/6{7-8} database system (tenant) state offset=47 ---- @@ -133,6 +135,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/10{-/Table/4} database system (tenant) /Tenant/10/Table/{4-5} database system (tenant) /Tenant/10/Table/{5-6} database system (tenant) @@ -189,6 +192,7 @@ state offset=47 /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/11{-\x00} database system (tenant) /Tenant/12{-\x00} database system (tenant) @@ -213,6 +217,7 @@ upsert /Tenant/10/Table/11{3-4} rangefeed_enabled=true state offset=81 ---- ... +/Tenant/10/Table/2{0-1} database system (tenant) /Tenant/10/Table/2{1-2} database system (tenant) /Tenant/10/Table/2{3-4} database system (tenant) /Tenant/10/Table/2{4-5} database system (tenant) @@ -255,6 +260,7 @@ state offset=81 /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/10/Table/10{6-7} rangefeed_enabled=true /Tenant/10/Table/10{7-8} rangefeed_enabled=true /Tenant/10/Table/11{2-3} rangefeed_enabled=true diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/protectedts b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/protectedts index 7c85774d1e0c..bb5e06b987d8 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/protectedts +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/protectedts @@ -39,6 +39,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/10{-\x00} database system (tenant) /Tenant/11{-\x00} database system (tenant) @@ -125,6 +126,7 @@ upsert /Tenant/10/Table/6{3-4} database system (tenant) upsert /Tenant/10/Table/6{4-5} database system (tenant) upsert /Tenant/10/Table/6{5-6} database system (tenant) upsert /Tenant/10/Table/6{6-7} database system (tenant) +upsert /Tenant/10/Table/6{7-8} database system (tenant) exec-sql tenant=10 CREATE DATABASE db; diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/range_tenants b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/range_tenants index ccdad786844e..240c0c4752ef 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/range_tenants +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/range_tenants @@ -58,6 +58,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/10{-\x00} database system (tenant) /Tenant/11{-\x00} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true /Tenant/12{-\x00} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true @@ -94,6 +95,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/10{-/Table/4} database system (tenant) /Tenant/10/Table/{4-5} database system (tenant) /Tenant/10/Table/{5-6} database system (tenant) @@ -150,6 +152,7 @@ state offset=47 /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/11{-\x00} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true /Tenant/12{-\x00} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true @@ -187,6 +190,7 @@ mutations discard tenant=11 state offset=81 ---- ... +/Tenant/10/Table/2{0-1} database system (tenant) /Tenant/10/Table/2{1-2} database system (tenant) /Tenant/10/Table/2{3-4} database system (tenant) /Tenant/10/Table/2{4-5} database system (tenant) @@ -229,6 +233,7 @@ state offset=81 /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/10/Table/10{6-7} rangefeed_enabled=true /Tenant/10/Table/10{7-8} rangefeed_enabled=true /Tenant/11{-/Table/4} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true @@ -287,6 +292,7 @@ state offset=81 /Tenant/11/Table/6{4-5} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true /Tenant/11/Table/6{5-6} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true /Tenant/11/Table/6{6-7} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true +/Tenant/11/Table/6{7-8} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true /Tenant/12{-\x00} ttl_seconds=18000 ignore_strict_gc=true rangefeed_enabled=true query-sql tenant=11 diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/tenant_end_key_split b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/tenant_end_key_split index bab0223a3ef3..26b28cb40571 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/tenant_end_key_split +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/multitenant/tenant_end_key_split @@ -25,6 +25,7 @@ state offset=59 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Tenant/11{-\x00} database system (tenant) /Tenant/12{-\x00} database system (tenant) @@ -53,6 +54,7 @@ state offset=60 limit=3 state offset=103 ---- ... +/Tenant/11/Table/4{5-6} ignore_strict_gc=true rangefeed_enabled=true exclude_data_from_backup=true /Tenant/11/Table/4{6-7} database system (tenant) /Tenant/11/Table/4{7-8} ignore_strict_gc=true rangefeed_enabled=true exclude_data_from_backup=true /Tenant/11/Table/4{8-9} database system (tenant) @@ -73,6 +75,7 @@ state offset=103 /Tenant/11/Table/6{4-5} database system (tenant) /Tenant/11/Table/6{5-6} database system (tenant) /Tenant/11/Table/6{6-7} database system (tenant) +/Tenant/11/Table/6{7-8} database system (tenant) /Tenant/12{-\x00} database system (tenant) # Just another view of what the tenant's reconciler actually did. It got rid of @@ -138,6 +141,7 @@ upsert /Tenant/11/Table/6{3-4} database system (tenant) upsert /Tenant/11/Table/6{4-5} database system (tenant) upsert /Tenant/11/Table/6{5-6} database system (tenant) upsert /Tenant/11/Table/6{6-7} database system (tenant) +upsert /Tenant/11/Table/6{7-8} database system (tenant) # Initialize a new tenant, tenant=10, that DOES have a pre-existing tenant, # tenant=11, next to it. diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/named_zones b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/named_zones index 3efbf1218381..acdc6fc99700 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/named_zones +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/named_zones @@ -144,6 +144,7 @@ state offset=46 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} ttl_seconds=50 # Make sure future descendants observe the same. @@ -179,6 +180,7 @@ state offset=46 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} ttl_seconds=50 /Table/10{7-8} ttl_seconds=50 diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/partitions b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/partitions index a6c9fc6a7f0e..7dd22d33850e 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/partitions +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/partitions @@ -33,6 +33,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) exec-sql CREATE DATABASE db; @@ -72,6 +73,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} range default # All parent schema zone config changes cascade to the entire table's span. @@ -108,6 +110,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} num_replicas=7 num_voters=5 # Apply a zone configuration on one of the partitions, `one_two`, which @@ -149,6 +152,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/1/1} num_replicas=7 num_voters=5 /Table/106/1/{1-2} global_reads=true num_replicas=7 num_voters=5 /Table/106/1/{2-3} global_reads=true num_replicas=7 num_voters=5 @@ -190,6 +194,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/1/1} num_replicas=7 num_voters=5 /Table/106/1/{1-2} global_reads=true num_replicas=7 num_voters=5 /Table/106/1/{2-3} global_reads=true num_replicas=7 num_voters=5 @@ -245,6 +250,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/1} num_replicas=7 num_voters=5 /Table/106/1{-/1} num_replicas=7 num_voters=6 /Table/106/1/{1-2} global_reads=true num_replicas=7 num_voters=5 @@ -298,6 +304,7 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/106{-/1} num_replicas=7 /Table/106/1{-/1} num_replicas=7 num_voters=6 /Table/106/1/{1-2} global_reads=true num_replicas=7 @@ -345,3 +352,4 @@ state offset=47 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/protectedts b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/protectedts index 0e6bbe3cae56..df1a1d8508f4 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/protectedts +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/testdata/protectedts @@ -96,6 +96,7 @@ state offset=51 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} protection_policies=[{ts: 3} {ts: 4}] /Table/10{7-8} protection_policies=[{ts: 3} {ts: 4}] @@ -154,6 +155,7 @@ state offset=57 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} protection_policies=[{ts: 3} {ts: 4}] /Table/10{7-8} protection_policies=[{ts: 3} {ts: 4}] @@ -178,5 +180,6 @@ state offset=57 /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/10{6-7} range default /Table/10{7-8} range default diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate index 70cc62ac4439..e3cd919048cb 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate @@ -90,6 +90,7 @@ full-translate /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) /Table/11{0-1} range default /Table/11{1-2} range default /Table/11{2-3} range default diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate_named_zones_deleted b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate_named_zones_deleted index dc1446d9e896..1d066d0cc22e 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate_named_zones_deleted +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/full_translate_named_zones_deleted @@ -116,3 +116,4 @@ full-translate /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/partitions b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/partitions index c467d91643d1..6dadcbeabe8e 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/partitions +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/partitions @@ -148,3 +148,22 @@ translate database=db table=partition_by_list /Table/109/1/{6-7} ttl_seconds=4 /Table/109/{1/7-2} ttl_seconds=4 /Table/109/{2-PrefixEnd} ttl_seconds=1 + +exec-sql +CREATE TABLE db.test(i INT PRIMARY KEY, j INT) PARTITION BY LIST (i) ( + PARTITION one_and_five VALUES IN (1, 5) +); +ALTER PARTITION one_and_five OF TABLE db.test CONFIGURE ZONE USING gc.ttlseconds = 2; +ALTER INDEX db.test@test_pkey CONFIGURE ZONE USING num_replicas = 4; +ALTER PARTITION one_and_five OF TABLE db.test CONFIGURE ZONE USING gc.ttlseconds = 3; +---- + +translate database=db table=test +---- +/Table/110{-/1} range default +/Table/110/1{-/1} num_replicas=4 +/Table/110/1/{1-2} ttl_seconds=3 num_replicas=4 +/Table/110/1/{2-5} num_replicas=4 +/Table/110/1/{5-6} ttl_seconds=3 num_replicas=4 +/Table/110/{1/6-2} num_replicas=4 +/Table/11{0/2-1} range default diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/system_database b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/system_database index 6265954731c2..431ed189fea2 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/system_database +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/system_database @@ -66,6 +66,7 @@ translate database=system /Table/6{4-5} database system (host) /Table/6{5-6} database system (host) /Table/6{6-7} database system (host) +/Table/6{7-8} database system (host) # Alter zone config fields on the database to ensure the effects cascade. exec-sql @@ -136,6 +137,7 @@ translate database=system /Table/6{4-5} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true /Table/6{5-6} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true /Table/6{6-7} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true +/Table/6{7-8} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true # Alter a named range that maps to a pseudo table ID, ensuring that its effects # are independent. @@ -212,3 +214,4 @@ full-translate /Table/6{4-5} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true /Table/6{5-6} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true /Table/6{6-7} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true +/Table/6{7-8} ignore_strict_gc=true num_replicas=7 rangefeed_enabled=true diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/full_translate b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/full_translate index 223cff735db4..9a226d9c7f20 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/full_translate +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/full_translate @@ -73,6 +73,7 @@ full-translate /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/10/Table/11{0-1} rangefeed_enabled=true /Tenant/10/Table/11{1-2} rangefeed_enabled=true /Tenant/10/Table/11{2-3} rangefeed_enabled=true @@ -136,6 +137,7 @@ translate named-zone=default /Tenant/10/Table/6{4-5} database system (tenant) /Tenant/10/Table/6{5-6} database system (tenant) /Tenant/10/Table/6{6-7} database system (tenant) +/Tenant/10/Table/6{7-8} database system (tenant) /Tenant/10/Table/11{0-1} rangefeed_enabled=true /Tenant/10/Table/11{1-2} rangefeed_enabled=true /Tenant/10/Table/11{2-3} rangefeed_enabled=true diff --git a/pkg/ccl/sqlitelogictestccl/tests/3node-tenant/generated_test.go b/pkg/ccl/sqlitelogictestccl/tests/3node-tenant/generated_test.go index 65ff6984915e..32c0f7ab9b4f 100644 --- a/pkg/ccl/sqlitelogictestccl/tests/3node-tenant/generated_test.go +++ b/pkg/ccl/sqlitelogictestccl/tests/3node-tenant/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 9 +const configIdx = 10 var sqliteLogicTestDir string diff --git a/pkg/ccl/storageccl/BUILD.bazel b/pkg/ccl/storageccl/BUILD.bazel index 6a06ce70aa1e..81ac5ff6e6cb 100644 --- a/pkg/ccl/storageccl/BUILD.bazel +++ b/pkg/ccl/storageccl/BUILD.bazel @@ -47,6 +47,7 @@ go_test( "//pkg/testutils/serverutils", "//pkg/testutils/storageutils", "//pkg/testutils/testcluster", + "//pkg/util/cidr", "//pkg/util/encoding", "//pkg/util/humanizeutil", "//pkg/util/ioctx", diff --git a/pkg/ccl/storageccl/engineccl/BUILD.bazel b/pkg/ccl/storageccl/engineccl/BUILD.bazel index f146b628c2d0..555437bfa324 100644 --- a/pkg/ccl/storageccl/engineccl/BUILD.bazel +++ b/pkg/ccl/storageccl/engineccl/BUILD.bazel @@ -32,8 +32,8 @@ go_library( "@com_github_cockroachdb_pebble//vfs", "@com_github_cockroachdb_pebble//vfs/atomicfs", "@com_github_gogo_protobuf//proto", - "@com_github_lestrrat_go_jwx//jwa", - "@com_github_lestrrat_go_jwx//jwk", + "@com_github_lestrrat_go_jwx_v2//jwa", + "@com_github_lestrrat_go_jwx_v2//jwk", ], ) diff --git a/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go b/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go index cb714daa48d1..297433f2ccc0 100644 --- a/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go +++ b/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go @@ -536,7 +536,7 @@ func (etfs *encryptedTestFS) restart() error { etfs.encEnv.Closer.Close() etfs.encEnv = nil } - etfs.mem.ResetToSyncedState() + etfs.mem = etfs.mem.CrashClone(vfs.CrashCloneCfg{}) ei := &errorInjector{prob: etfs.errorProb, rand: etfs.errorRand} fsMeta := errorfs.Wrap(etfs.mem, ei) // TODO(sumeer): Do deterministic rollover of file registry after small @@ -559,7 +559,7 @@ func (etfs *encryptedTestFS) restart() error { } func makeEncryptedTestFS(t *testing.T, errorProb float64, errorRand *rand.Rand) *encryptedTestFS { - mem := vfs.NewStrictMem() + mem := vfs.NewCrashableMem() keyFile128 := "111111111111111111111111111111111234567890123456" writeToFile(t, mem, "16.key", []byte(keyFile128)) dir, err := mem.OpenDir("/") diff --git a/pkg/ccl/storageccl/engineccl/pebble_key_manager.go b/pkg/ccl/storageccl/engineccl/pebble_key_manager.go index 0aac15853120..724d689bce93 100644 --- a/pkg/ccl/storageccl/engineccl/pebble_key_manager.go +++ b/pkg/ccl/storageccl/engineccl/pebble_key_manager.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/pebble/vfs" "github.com/cockroachdb/pebble/vfs/atomicfs" "github.com/gogo/protobuf/proto" - "github.com/lestrrat-go/jwx/jwa" - "github.com/lestrrat-go/jwx/jwk" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" ) const ( @@ -163,7 +163,7 @@ func LoadKeyFromFile(fs vfs.FS, filename string) (*enginepbccl.SecretKey, error) // Since random generation of 48+ bytes will not produce a valid json object, // if the file parses as JWK, assume that was the intended format. if keySet, jwkErr := jwk.Parse(b); jwkErr == nil { - jwKey, ok := keySet.Get(0) + jwKey, ok := keySet.Key(0) if !ok { return nil, fmt.Errorf("JWKS file contains no keys") } @@ -173,7 +173,7 @@ func LoadKeyFromFile(fs vfs.FS, filename string) (*enginepbccl.SecretKey, error) if jwKey.KeyType() != jwa.OctetSeq { return nil, fmt.Errorf("expected kty=oct, found %s", jwKey.KeyType()) } - key.Info.EncryptionType, err = enginepbccl.EncryptionTypeFromJWKAlgorithm(jwKey.Algorithm()) + key.Info.EncryptionType, err = enginepbccl.EncryptionTypeFromJWKAlgorithm(jwKey.Algorithm().String()) if err != nil { return nil, err } diff --git a/pkg/ccl/storageccl/external_sst_reader_test.go b/pkg/ccl/storageccl/external_sst_reader_test.go index 8ee0fe0f858c..54e6cbe72d72 100644 --- a/pkg/ccl/storageccl/external_sst_reader_test.go +++ b/pkg/ccl/storageccl/external_sst_reader_test.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" + "github.com/cockroachdb/cockroach/pkg/util/cidr" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -170,7 +171,7 @@ func TestNewExternalSSTReaderFailure(t *testing.T) { ctx := context.Background() settings := cluster.MakeTestingClusterSettings() - metrics := cloud.MakeMetrics() + metrics := cloud.MakeMetrics(cidr.NewLookup(&settings.SV)) const localFoo = "nodelocal://1/foo" diff --git a/pkg/ccl/telemetryccl/telemetry_logging_test.go b/pkg/ccl/telemetryccl/telemetry_logging_test.go index 2aaa099633ac..eb4f6c4ae43e 100644 --- a/pkg/ccl/telemetryccl/telemetry_logging_test.go +++ b/pkg/ccl/telemetryccl/telemetry_logging_test.go @@ -436,7 +436,7 @@ func cleanUpObjectsBeforeRestore( if len(dbMatch) > 0 { dbName := dbMatch[1] if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s CASCADE", dbName)); err != nil { - t.Errorf(errors.Wrapf(err, "failed to drop database %q before restore", dbName).Error()) + t.Error(errors.Wrapf(err, "failed to drop database %q before restore", dbName).Error()) } } @@ -445,7 +445,7 @@ func cleanUpObjectsBeforeRestore( if len(tableMatch) > 0 { tableName := tableMatch[1] if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName)); err != nil { - t.Errorf(errors.Wrapf(err, "failed to drop table %q before restore", tableName).Error()) + t.Error(errors.Wrapf(err, "failed to drop table %q before restore", tableName).Error()) } } } diff --git a/pkg/ccl/testccl/authccl/testdata/jwt b/pkg/ccl/testccl/authccl/testdata/jwt index 9bf3dd5d0c00..9e4109033cd2 100644 --- a/pkg/ccl/testccl/authccl/testdata/jwt +++ b/pkg/ccl/testccl/authccl/testdata/jwt @@ -67,7 +67,7 @@ subtest expired_token connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjE2NjEyNjQzOTgsImlhdCI6MTY2MTI2NDM5OCwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.1nWuqpwj4uPDk0pyyqEJhpIgyridv699B7OjEBGSyQ8iyrqryeG1yr7oP1qnKlrcqtbVmuB5ELJoXNUerd8BL0GQBMCkkxjG1cuLvLNOWo5yzifcfYHiiaCL25EblWG46eBrxAeHmqGigQiIpSUPjQTlZT_lRLrEI9h_xQhwNp5AnsY2S1f8N4oaMqjUjgREGdLhZT9sOyNmrf5uowTFcR3aWBkpIB5Ac5rvI8-U7-D1rY5KJ3Wez4G2L3Miyof_lOlK1g8XwAasCPKlhHea5qZNjqHLqgOb5EIQ_yd_KICT7pFLSgMXw_IJ9c68z-H1N7wEivnnLydgQUR3WVEytA ---- ERROR: JWT authentication: invalid token (SQLSTATE 28000) -DETAIL: unable to parse token: exp not satisfied +DETAIL: unable to parse token: "exp" not satisfied subtest end @@ -81,7 +81,7 @@ jwt_cluster_setting jwks={"keys":[{"kty":"RSA","use":"sig","alg":"RS256","kid":" connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- ERROR: JWT authentication: invalid token (SQLSTATE 28000) -DETAIL: unable to parse token: failed to find key with key ID "test2" in key set +DETAIL: unable to parse token: key provider 0 failed: failed to find key with key ID "test2" in key set jwt_cluster_setting jwks={"keys":[{"kty":"RSA","use":"sig","alg":"RS256","kid":"test","n":"sJCwOk5gVjZZu3oaODecZaT_-Lee7J-q3rQIvCilg-7B8fFNJ2XHZCsF74JX2d7ePyjz7u9d2r5CvstufiH0qGPHBBm0aKrxGRILRGUTfqBs8Dnrnv9ymTEFsRUQjgy9ACUfwcgLVQIwv1NozySLb4Z5N8X91b0TmcJun6yKjBrnr1ynUsI_XXjzLnDpJ2Ng_shuj-z7DKSEeiFUg9eSFuTeg_wuHtnnhw4Y9pwT47c-XBYnqtGYMADSVEzKLQbUini0p4-tfYboF6INluKQsO5b1AZaaXgmStPIqteS7r2eR3LFL-XB7rnZOR4cAla773Cq5DD-8RnYamnmmLu_gQ","e":"AQAB"},{"kty":"RSA","use":"sig","alg":"RS256","kid":"test2","n":"3gOrVdePypBAs6bTwD-6dZhMuwOSq8QllMihBfcsiRmo3c14_wfa_DRDy3kSsacwdih5-CaeF8ou-Dan6WqXzjDyJNekmGltPLfO2XB5FkHQoZ-X9lnXktsAgNLj3WsKjr-xUxrh8p8FFz62HJYN8QGaNttWBJZb3CgdzF7i8bPqVet4P1ekzs7mPBH2arEDy1f1q4o7fpmw0t9wuCrmtkj_g_eS6Hi2Rxm3m7HJUFVVbQeuZlT_W84FUzpSQCkNi2QDvoNVVCE2DSYZxDrzRxSZSv_fIh5XeJhwYY-f8iEfI4qx91ONGzGMvPn2GagrBnLBQRx-6RsORh4YmOOeeQ","e":"AQAB"}]} ---- diff --git a/pkg/ccl/testccl/sqlccl/BUILD.bazel b/pkg/ccl/testccl/sqlccl/BUILD.bazel index a82521feb1ea..edbfcab19cf9 100644 --- a/pkg/ccl/testccl/sqlccl/BUILD.bazel +++ b/pkg/ccl/testccl/sqlccl/BUILD.bazel @@ -39,7 +39,6 @@ go_test( "//pkg/settings/cluster", "//pkg/spanconfig", "//pkg/sql", - "//pkg/sql/catalog", "//pkg/sql/gcjob", "//pkg/sql/isql", "//pkg/sql/lexbase", diff --git a/pkg/ccl/testccl/sqlccl/explain_test.go b/pkg/ccl/testccl/sqlccl/explain_test.go index 47c269f1d662..733ec4580f84 100644 --- a/pkg/ccl/testccl/sqlccl/explain_test.go +++ b/pkg/ccl/testccl/sqlccl/explain_test.go @@ -16,10 +16,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/internal/sqlsmith" - "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" "github.com/cockroachdb/cockroach/pkg/sql/tests" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -253,7 +253,7 @@ func TestExplainGist(t *testing.T) { if err != nil { // We might be still in the process of cancelling the previous // DROP operation - ignore this particular error. - if !errors.Is(err, catalog.ErrDescriptorDropped) { + if !testutils.IsError(err, "descriptor is being dropped") { t.Fatal(err) } continue diff --git a/pkg/ccl/utilccl/BUILD.bazel b/pkg/ccl/utilccl/BUILD.bazel index caf5aff401eb..40571896bcc8 100644 --- a/pkg/ccl/utilccl/BUILD.bazel +++ b/pkg/ccl/utilccl/BUILD.bazel @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/ccl/utilccl/licenseccl", + "//pkg/server/license", "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql", @@ -19,8 +20,6 @@ go_library( "//pkg/sql/sem/tree", "//pkg/util/envutil", "//pkg/util/log", - "//pkg/util/metric", - "//pkg/util/stop", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", @@ -40,10 +39,16 @@ go_test( "//pkg/base", "//pkg/ccl", "//pkg/ccl/utilccl/licenseccl", + "//pkg/security/securityassets", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/server/license", "//pkg/settings", "//pkg/settings/cluster", "//pkg/testutils", + "//pkg/testutils/serverutils", "//pkg/util/envutil", + "//pkg/util/leaktest", "//pkg/util/randutil", "//pkg/util/stop", "//pkg/util/timeutil", diff --git a/pkg/ccl/utilccl/license_check.go b/pkg/ccl/utilccl/license_check.go index 07262a736ed7..08446094029e 100644 --- a/pkg/ccl/utilccl/license_check.go +++ b/pkg/ccl/utilccl/license_check.go @@ -15,6 +15,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/licenseccl" + licenseserver "github.com/cockroachdb/cockroach/pkg/server/license" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -22,8 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/metric" - "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" @@ -145,49 +144,24 @@ func IsEnterpriseEnabled(st *cluster.Settings, feature string) bool { st, timeutil.Now(), feature, false /* withDetails */) == nil } -var licenseMetricUpdateFrequency = 1 * time.Minute - -// UpdateMetricOnLicenseChange starts a task to periodically update -// the given metric with the seconds remaining until license expiry. -func UpdateMetricOnLicenseChange( +// GetLicenseTTL is a function which returns the TTL for the active cluster. +// This is done by reading the license information from the cluster settings +// and subtracting the epoch from the expiry timestamp. +var GetLicenseTTL = func( ctx context.Context, st *cluster.Settings, - metric *metric.Gauge, ts timeutil.TimeSource, - stopper *stop.Stopper, -) error { - enterpriseLicense.SetOnChange(&st.SV, func(ctx context.Context) { - updateMetricWithLicenseTTL(ctx, st, metric, ts) - }) - return stopper.RunAsyncTask(ctx, "write-license-expiry-metric", func(ctx context.Context) { - ticker := time.NewTicker(licenseMetricUpdateFrequency) - defer ticker.Stop() - for { - select { - case <-ticker.C: - updateMetricWithLicenseTTL(ctx, st, metric, ts) - case <-stopper.ShouldQuiesce(): - return - } - } - }) -} - -func updateMetricWithLicenseTTL( - ctx context.Context, st *cluster.Settings, metric *metric.Gauge, ts timeutil.TimeSource, -) { +) int64 { license, err := getLicense(st) if err != nil { - log.Errorf(ctx, "unable to update license expiry metric: %v", err) - metric.Update(0) - return + log.Errorf(ctx, "unable to find license: %v", err) + return 0 } if license == nil { - metric.Update(0) - return + return 0 } sec := timeutil.Unix(license.ValidUntilUnixSec, 0).Sub(ts.Now()).Seconds() - metric.Update(int64(sec)) + return int64(sec) } // AllCCLCodeImported is set by the `ccl` pkg in an init(), thereby @@ -263,15 +237,15 @@ func GetLicenseType(st *cluster.Settings) (string, error) { return license.Type.String(), nil } -// GetLicenseUsage returns the license usage. -func GetLicenseUsage(st *cluster.Settings) (string, error) { +// GetLicenseEnvironment returns the license environment. +func GetLicenseEnvironment(st *cluster.Settings) (string, error) { license, err := getLicense(st) if err != nil { return "", err } else if license == nil { return "", nil } - return license.Usage.String(), nil + return license.Environment.String(), nil } // decode attempts to read a base64 encoded License. @@ -335,3 +309,37 @@ func check(l *licenseccl.License, at time.Time, org, feature string, withDetails return pgerror.Newf(pgcode.CCLValidLicenseRequired, "license valid only for %q", l.OrganizationName) } + +// RegisterCallbackOnLicenseChange will register a callback to update the +// license enforcer whenever the license changes. +func RegisterCallbackOnLicenseChange(ctx context.Context, st *cluster.Settings) { + refreshFunc := func(ctx context.Context) { + lic, err := getLicense(st) + if err != nil { + log.Errorf(ctx, "unable to refresh license enforcer for license change: %v", err) + return + } + var licenseType licenseserver.LicType + var licenseExpiry time.Time + if lic == nil { + licenseType = licenseserver.LicTypeNone + } else { + licenseExpiry = timeutil.Unix(lic.ValidUntilUnixSec, 0) + switch lic.Type { + case licenseccl.License_Free: + licenseType = licenseserver.LicTypeFree + case licenseccl.License_Trial: + licenseType = licenseserver.LicTypeTrial + case licenseccl.License_Evaluation: + licenseType = licenseserver.LicTypeEvaluation + default: + licenseType = licenseserver.LicTypeEnterprise + } + } + licenseserver.GetEnforcerInstance().RefreshForLicenseChange(licenseType, licenseExpiry) + } + // Install the hook so that we refresh license details when the license changes. + enterpriseLicense.SetOnChange(&st.SV, refreshFunc) + // Call the refresh function for the current license. + refreshFunc(ctx) +} diff --git a/pkg/ccl/utilccl/license_check_test.go b/pkg/ccl/utilccl/license_check_test.go index e0677960d714..f29ea2d636f8 100644 --- a/pkg/ccl/utilccl/license_check_test.go +++ b/pkg/ccl/utilccl/license_check_test.go @@ -10,21 +10,28 @@ package utilccl import ( "context" + "fmt" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/licenseccl" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/license" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/stretchr/testify/require" ) func TestSettingAndCheckingLicense(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() t0 := timeutil.Unix(0, 0) @@ -63,24 +70,28 @@ func TestSettingAndCheckingLicense(t *testing.T) { } func TestGetLicenseTypePresent(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() for _, tc := range []struct { - typ licenseccl.License_Type - expectedType string - usage licenseccl.License_Usage - expectedUsage string + typ licenseccl.License_Type + expectedType string + environment licenseccl.License_Environment + expectedEnvironment string }{ {licenseccl.License_NonCommercial, "NonCommercial", licenseccl.PreProduction, "pre-production"}, {licenseccl.License_Enterprise, "Enterprise", licenseccl.Production, "production"}, {licenseccl.License_Evaluation, "Evaluation", licenseccl.Development, "development"}, {licenseccl.License_Enterprise, "Enterprise", licenseccl.Unspecified, ""}, + {licenseccl.License_Free, "Free", licenseccl.Development, "development"}, + {licenseccl.License_Trial, "Trial", licenseccl.PreProduction, "pre-production"}, } { st := cluster.MakeTestingClusterSettings() updater := st.MakeUpdater() lic, _ := (&licenseccl.License{ Type: tc.typ, ValidUntilUnixSec: 0, - Usage: tc.usage, + Environment: tc.environment, }).Encode() if err := setLicense(ctx, updater, lic); err != nil { t.Fatal(err) @@ -92,19 +103,21 @@ func TestGetLicenseTypePresent(t *testing.T) { if actualType != tc.expectedType { t.Fatalf("expected license type %s, got %s", tc.expectedType, actualType) } - actualUsage, err := GetLicenseUsage(st) + actualEnvironment, err := GetLicenseEnvironment(st) if err != nil { t.Fatal(err) } - if actualUsage != tc.expectedUsage { - t.Fatalf("expected license usage %s, got %s", tc.expectedUsage, actualUsage) + if actualEnvironment != tc.expectedEnvironment { + t.Fatalf("expected license environment %s, got %s", tc.expectedEnvironment, actualEnvironment) } } } -func TestUnknownUsageEnum(t *testing.T) { - // This literal was generated with an enum value of 100 for usage, to show - // what happens if we add more usages later and then try to apply one to an +func TestUnknownEnvironmentEnum(t *testing.T) { + defer leaktest.AfterTest(t)() + + // This literal was generated with an enum value of 100 for environment, to show + // what happens if we add more environments later and then try to apply one to an // older node which does not include it. l, err := decode(`crl-0-GAIoZA`) if err != nil { @@ -113,12 +126,14 @@ func TestUnknownUsageEnum(t *testing.T) { if expected, got := "Evaluation", l.Type.String(); got != expected { t.Fatalf("expected license type %s, got %s", expected, got) } - if expected, got := "other", l.Usage.String(); got != expected { - t.Fatalf("expected license usage %q, got %q", expected, got) + if expected, got := "other", l.Environment.String(); got != expected { + t.Fatalf("expected license environment %q, got %q", expected, got) } } func TestGetLicenseTypeAbsent(t *testing.T) { + defer leaktest.AfterTest(t)() + expected := "None" actual, err := GetLicenseType(cluster.MakeTestingClusterSettings()) if err != nil { @@ -130,6 +145,8 @@ func TestGetLicenseTypeAbsent(t *testing.T) { } func TestSettingBadLicenseStrings(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() for _, tc := range []struct{ lic, err string }{ {"blah", "invalid license string"}, @@ -147,6 +164,8 @@ func TestSettingBadLicenseStrings(t *testing.T) { } func TestTimeToEnterpriseLicenseExpiry(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() t0 := timeutil.Unix(1603926294, 0) @@ -162,12 +181,12 @@ func TestTimeToEnterpriseLicenseExpiry(t *testing.T) { }).Encode() lic0M, _ := (&licenseccl.License{ - Type: licenseccl.License_Evaluation, + Type: licenseccl.License_Free, ValidUntilUnixSec: t0.AddDate(0, 0, 0).Unix(), }).Encode() licExpired, _ := (&licenseccl.License{ - Type: licenseccl.License_Evaluation, + Type: licenseccl.License_Trial, ValidUntilUnixSec: t0.AddDate(0, -1, 0).Unix(), }).Encode() @@ -177,9 +196,6 @@ func TestTimeToEnterpriseLicenseExpiry(t *testing.T) { defer stopper.Stop(ctx) manualTime := timeutil.NewManualTime(t0) - err := UpdateMetricOnLicenseChange(context.Background(), st, base.LicenseTTL, manualTime, stopper) - require.NoError(t, err) - for _, tc := range []struct { desc string lic string @@ -195,13 +211,15 @@ func TestTimeToEnterpriseLicenseExpiry(t *testing.T) { if err := setLicense(ctx, updater, tc.lic); err != nil { t.Fatal(err) } - actual := base.LicenseTTL.Value() + actual := base.GetLicenseTTL(context.Background(), st, manualTime) require.Equal(t, tc.ttlSeconds, actual) }) } } func TestApplyTenantLicenseWithLicense(t *testing.T) { + defer leaktest.AfterTest(t)() + license, _ := (&licenseccl.License{ Type: licenseccl.License_Enterprise, }).Encode() @@ -219,6 +237,8 @@ func TestApplyTenantLicenseWithLicense(t *testing.T) { } func TestApplyTenantLicenseWithoutLicense(t *testing.T) { + defer leaktest.AfterTest(t)() + defer TestingDisableEnterprise()() settings := cluster.MakeClusterSettings() @@ -234,6 +254,7 @@ func TestApplyTenantLicenseWithoutLicense(t *testing.T) { } func TestApplyTenantLicenseWithInvalidLicense(t *testing.T) { + defer leaktest.AfterTest(t)() defer envutil.TestSetEnv(t, "COCKROACH_TENANT_LICENSE", "THIS IS NOT A VALID LICENSE")() require.Error(t, ApplyTenantLicense()) } @@ -244,3 +265,80 @@ func setLicense(ctx context.Context, updater settings.Updater, val string) error Type: "s", }) } + +func TestRefreshLicenseEnforcerOnLicenseChange(t *testing.T) { + defer leaktest.AfterTest(t)() + + ts1 := timeutil.Unix(1724329716, 0) + + ctx := context.Background() + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + // We are changing a cluster setting that can only be done at the system tenant. + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + LicenseTestingKnobs: license.TestingKnobs{ + OverrideStartTime: &ts1, + }, + }, + }, + }) + defer srv.Stopper().Stop(ctx) + + // All of the licenses that we install later depend on this org name. + _, err := srv.SystemLayer().SQLConn(t).Exec( + "SET CLUSTER SETTING cluster.organization = 'CRDB Unit Test'", + ) + require.NoError(t, err) + + // Test to ensure that the state is correctly registered on startup before + // changing the license. + enforcer := license.GetEnforcerInstance() + require.Equal(t, false, enforcer.GetHasLicense()) + gracePeriodTS, hasGracePeriod := enforcer.GetGracePeriodEndTS() + require.True(t, hasGracePeriod) + require.Equal(t, ts1.Add(7*24*time.Hour), gracePeriodTS) + + jan1st2000 := timeutil.Unix(946728000, 0) + + for i, tc := range []struct { + license string + expectedGracePeriodEnd time.Time + }{ + // Note: all licenses below expire on Jan 1st 2000 + // + // Free license - 30 days grace period + {"crl-0-EMDYt8MDGAMiDkNSREIgVW5pdCBUZXN0", jan1st2000.Add(30 * 24 * time.Hour)}, + // Trial license - 7 days grace period + {"crl-0-EMDYt8MDGAQiDkNSREIgVW5pdCBUZXN0", jan1st2000.Add(7 * 24 * time.Hour)}, + // Enterprise - no grace period + {"crl-0-EMDYt8MDGAEiDkNSREIgVW5pdCBUZXN0KAM", timeutil.UnixEpoch}, + // No license - 7 days grace period + {"", ts1.Add(7 * 24 * time.Hour)}, + } { + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + _, err := srv.SQLConn(t).Exec( + fmt.Sprintf("SET CLUSTER SETTING enterprise.license = '%s'", tc.license), + ) + require.NoError(t, err) + // The SQL can return back before the callback has finished. So, we wait a + // bit to see if the desired state is reached. + var hasLicense bool + require.Eventually(t, func() bool { + hasLicense = enforcer.GetHasLicense() + return (tc.license != "") == hasLicense + }, 20*time.Second, time.Millisecond, + "GetHasLicense() last returned %t", hasLicense) + var ts time.Time + var hasGracePeriod bool + require.Eventually(t, func() bool { + ts, hasGracePeriod = enforcer.GetGracePeriodEndTS() + if tc.expectedGracePeriodEnd.Equal(timeutil.UnixEpoch) { + return !hasGracePeriod + } + return ts.Equal(tc.expectedGracePeriodEnd) + }, 20*time.Second, time.Millisecond, + "GetGracePeriodEndTS() last returned %v (%t)", ts, hasGracePeriod) + }) + } +} diff --git a/pkg/ccl/utilccl/license_test.go b/pkg/ccl/utilccl/license_test.go index d021dac5338e..16afa06035b9 100644 --- a/pkg/ccl/utilccl/license_test.go +++ b/pkg/ccl/utilccl/license_test.go @@ -15,16 +15,24 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/licenseccl" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) func TestLicense(t *testing.T) { + defer leaktest.AfterTest(t)() + t0 := timeutil.Unix(0, 0) ts := t0.AddDate(40, 0, 0) after := ts.Add(time.Hour * 24) before := ts.Add(time.Hour * -24) wayAfter := ts.Add(time.Hour * 24 * 365 * 200) + // Generate random, yet deterministic, values for the two byte fields. + // The first byte of each will be incremented after each test to ensure variation. + orgID := []byte{0} + licenseID := []byte{0} + for i, tc := range []struct { licType licenseccl.License_Type expiration time.Time @@ -54,6 +62,14 @@ func TestLicense(t *testing.T) { {licenseccl.License_Enterprise, ts, "tc-13", ts, ""}, {licenseccl.License_Enterprise, ts, "", ts, "license valid only for"}, {licenseccl.License_Enterprise, ts, "tc-15", ts, ""}, + + // free license. + {licenseccl.License_Free, ts, "tc-16", ts, ""}, + {licenseccl.License_Free, after, "tc-17", wayAfter, "expired"}, + + // trial license. + {licenseccl.License_Trial, wayAfter, "tc-18", before, ""}, + {licenseccl.License_Trial, ts, "unmatched org", ts, "license valid only for"}, } { t.Run("", func(t *testing.T) { var lic *licenseccl.License @@ -62,6 +78,8 @@ func TestLicense(t *testing.T) { ValidUntilUnixSec: tc.expiration.Unix(), Type: tc.licType, OrganizationName: fmt.Sprintf("tc-%d", i), + OrganizationId: orgID, + LicenseId: licenseID, }).Encode() if err != nil { t.Fatal(err) @@ -78,11 +96,15 @@ func TestLicense(t *testing.T) { t.Fatalf("%d: lic to %s, checked at %s.\n got %q", i, tc.expiration, tc.checkTime, err) } + orgID[0]++ + licenseID[0]++ }) } } func TestBadLicenseStrings(t *testing.T) { + defer leaktest.AfterTest(t)() + for _, tc := range []struct{ lic, err string }{ {"blah", "invalid license string"}, {"crl-0-&&&&&", "invalid license string"}, @@ -95,6 +117,8 @@ func TestBadLicenseStrings(t *testing.T) { } func TestExpiredLicenseLanguage(t *testing.T) { + defer leaktest.AfterTest(t)() + lic := &licenseccl.License{ Type: licenseccl.License_Evaluation, ValidUntilUnixSec: 1, diff --git a/pkg/ccl/utilccl/licenseccl/license.go b/pkg/ccl/utilccl/licenseccl/license.go index 65eb0b2cf49c..997619f973e5 100644 --- a/pkg/ccl/utilccl/licenseccl/license.go +++ b/pkg/ccl/utilccl/licenseccl/license.go @@ -48,7 +48,7 @@ func Decode(s string) (*License, error) { return &lic, nil } -func (u License_Usage) String() string { +func (u License_Environment) String() string { switch u { case Unspecified: return "" diff --git a/pkg/ccl/utilccl/licenseccl/license.proto b/pkg/ccl/utilccl/licenseccl/license.proto index 2fd9c82e57e4..22bd9fe9cb9f 100644 --- a/pkg/ccl/utilccl/licenseccl/license.proto +++ b/pkg/ccl/utilccl/licenseccl/license.proto @@ -17,16 +17,18 @@ message License { int64 valid_until_unix_sec = 2; enum Type { - NonCommercial = 0; + NonCommercial = 0 [deprecated = true]; Enterprise = 1; Evaluation = 2; + Free = 3; + Trial = 4; } Type type = 3; string organization_name = 4; - enum Usage { + enum Environment { option (gogoproto.goproto_enum_prefix) = false; option (gogoproto.goproto_enum_stringer) = false; @@ -36,5 +38,12 @@ message License { Development = 3; } - Usage usage = 5; + Environment environment = 5; + + // Two UUIDs uniquely identify this license and the associated organization. + // They are stored as bytes to align with the server's typical usage. We + // avoided using the custom UUID type normally used in protobufs to minimize + // dependencies, as the generated code is also used in other repositories. + bytes license_id = 6; + bytes organization_id = 7; } diff --git a/pkg/ccl/utilccl/main_test.go b/pkg/ccl/utilccl/main_test.go index 71876195980c..a2880349eea6 100644 --- a/pkg/ccl/utilccl/main_test.go +++ b/pkg/ccl/utilccl/main_test.go @@ -13,10 +13,18 @@ import ( "testing" _ "github.com/cockroachdb/cockroach/pkg/ccl" + "github.com/cockroachdb/cockroach/pkg/security/securityassets" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/randutil" ) func TestMain(m *testing.M) { randutil.SeedForTests() + securityassets.SetLoader(securitytest.EmbeddedAssets) + serverutils.InitTestServerFactory(server.TestServerFactory) os.Exit(m.Run()) } + +//go:generate ../../util/leaktest/add-leaktest.sh *_test.go diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index 9647a6654964..18000dadd93a 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -191,6 +191,7 @@ go_library( "//pkg/util", "//pkg/util/buildutil", "//pkg/util/cgroups", + "//pkg/util/cidr", "//pkg/util/encoding", "//pkg/util/envutil", "//pkg/util/flagutil", @@ -215,6 +216,7 @@ go_library( "//pkg/util/stop", "//pkg/util/strutil", "//pkg/util/syncutil", + "//pkg/util/system", "//pkg/util/sysutil", "//pkg/util/timeutil", "//pkg/util/tracing", @@ -260,10 +262,13 @@ go_library( "@com_github_spf13_cobra//:cobra", "@com_github_spf13_cobra//doc", "@com_github_spf13_pflag//:pflag", + "@com_google_cloud_go_storage//:storage", "@in_gopkg_yaml_v2//:yaml_v2", + "@org_golang_google_api//option", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", + "@org_golang_x_oauth2//google", "@org_golang_x_sync//errgroup", ] + select({ "@io_bazel_rules_go//go/platform:aix": [ diff --git a/pkg/cli/auto_decrypt_fs.go b/pkg/cli/auto_decrypt_fs.go index 2585ce2b621a..fa0be620c65b 100644 --- a/pkg/cli/auto_decrypt_fs.go +++ b/pkg/cli/auto_decrypt_fs.go @@ -252,6 +252,11 @@ func (afs *autoDecryptFS) GetDiskUsage(path string) (vfs.DiskUsage, error) { return fs.GetDiskUsage(path) } +// Unwrap is part of the vfs.FS interface. +func (afs *autoDecryptFS) Unwrap() vfs.FS { + return nil +} + // maybeSwitchFS finds the first ancestor of path that is registered as an // encrypted FS; if there is such a path, returns the decrypted FS for that // path. Otherwise, returns the default FS. diff --git a/pkg/cli/cert.go b/pkg/cli/cert.go index 30652fb36896..244e1708f7e4 100644 --- a/pkg/cli/cert.go +++ b/pkg/cli/cert.go @@ -193,6 +193,7 @@ func runCreateClientCert(cmd *cobra.Command, args []string) error { certCtx.overwriteFiles, user, certCtx.tenantScope, + certCtx.tenantNameScope, certCtx.generatePKCS8Key), "failed to generate client certificate and key") } diff --git a/pkg/cli/cert_test.go b/pkg/cli/cert_test.go index 9f45ded1dd7b..c8f5aff6ebfd 100644 --- a/pkg/cli/cert_test.go +++ b/pkg/cli/cert_test.go @@ -17,6 +17,9 @@ func Example_cert() { c.RunWithCAArgs([]string{"cert", "create-client", "foo"}) c.RunWithCAArgs([]string{"cert", "create-client", "Ομηρος"}) c.RunWithCAArgs([]string{"cert", "create-client", "0foo"}) + c.RunWithCAArgs([]string{"cert", "create-client", "foo-1", "--tenant-scope", "1"}) + c.RunWithCAArgs([]string{"cert", "create-client", "foo-tenant2", "--tenant-name-scope", "tenant2"}) + c.RunWithCAArgs([]string{"cert", "create-client", "foo-1-tenant2", "--tenant-scope", "1", "--tenant-name-scope", "tenant2"}) c.RunWithCAArgs([]string{"cert", "create-client", ",foo"}) c.RunWithCAArgs([]string{"cert", "create-client", "--disable-username-validation", ",foo"}) @@ -24,6 +27,9 @@ func Example_cert() { // cert create-client foo // cert create-client Ομηρος // cert create-client 0foo + // cert create-client foo-1 --tenant-scope 1 + // cert create-client foo-tenant2 --tenant-name-scope tenant2 + // cert create-client foo-1-tenant2 --tenant-scope 1 --tenant-name-scope tenant2 // cert create-client ,foo // ERROR: failed to generate client certificate and key: username is invalid // HINT: Usernames are case insensitive, must start with a letter, digit or underscore, may contain letters, digits, dashes, periods, or underscores, and must not exceed 63 characters. diff --git a/pkg/cli/cliflags/flags.go b/pkg/cli/cliflags/flags.go index d7467b32fccf..79699fe98032 100644 --- a/pkg/cli/cliflags/flags.go +++ b/pkg/cli/cliflags/flags.go @@ -843,6 +843,14 @@ This flag is optional. When omitted, the certificate is not scoped; i.e. it can be used with all tenants.`, } + TenantScopeByNames = FlagInfo{ + Name: "tenant-name-scope", + Description: `Assign a tenant scope using tenant names to the certificate. +This will restrict the certificate to only be valid for the specified tenants. +This flag is optional. When omitted, the certificate is not scoped; i.e. +it can be used with all tenants.`, + } + GeneratePKCS8Key = FlagInfo{ Name: "also-generate-pkcs8-key", Description: `Also write the key in pkcs8 format to /client..key.pk8.`, diff --git a/pkg/cli/context.go b/pkg/cli/context.go index e1dc2adfd80e..c477cea225ce 100644 --- a/pkg/cli/context.go +++ b/pkg/cli/context.go @@ -250,9 +250,12 @@ var certCtx struct { certPrincipalMap []string // tenantScope indicates a tenantID(s) that a certificate is being // scoped to. By creating a tenant-scoped certicate, the usage of that certificate - // is restricted to a specific tenant. + // is restricted to a specific tenant(s). tenantScope []roachpb.TenantID - + // tenantNameScope indicates a tenantName(s) that a certificate is being scoped to. + // By creating a tenant-scoped certificate, the usage of that certificate is + // restricted to a specific tenant(s). + tenantNameScope []roachpb.TenantName // disableUsernameValidation removes the username syntax check on // the input. disableUsernameValidation bool @@ -269,9 +272,9 @@ func setCertContextDefaults() { certCtx.generatePKCS8Key = false certCtx.disableUsernameValidation = false certCtx.certPrincipalMap = nil - // Note: we set tenantScope to nil so that by default, client certs - // are not scoped to a specific tenant and can be used to connect to - // any tenant. + // Note: we set tenantScope and tenantNameScope to nil so that by default, + // client certs are not scoped to a specific tenant and can be used to + // connect to any tenant. // // Note that the scoping is generally useful for security, and it is // used in CockroachCloud. However, CockroachCloud does not use our @@ -283,6 +286,7 @@ func setCertContextDefaults() { // other, defaulting to certs that are valid on every tenant is a // good choice. certCtx.tenantScope = nil + certCtx.tenantNameScope = nil } var sqlExecCtx = clisqlexec.Context{ diff --git a/pkg/cli/debug.go b/pkg/cli/debug.go index bdc84f7e374a..40ce18c4a959 100644 --- a/pkg/cli/debug.go +++ b/pkg/cli/debug.go @@ -54,6 +54,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/storage/fs" + "github.com/cockroachdb/cockroach/pkg/util/cidr" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/flagutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -482,7 +483,7 @@ func runDebugRangeData(cmd *cobra.Command, args []string) error { stopper := stop.NewStopper() defer stopper.Stop(ctx) - earlyBootAccessor := cloud.NewEarlyBootExternalStorageAccessor(serverCfg.Settings, serverCfg.ExternalIODirConfig) + earlyBootAccessor := cloud.NewEarlyBootExternalStorageAccessor(serverCfg.Settings, serverCfg.ExternalIODirConfig, cidr.NewLookup(&serverCfg.Settings.SV)) opts := []storage.ConfigOption{storage.MustExist, storage.RemoteStorageFactory(earlyBootAccessor)} if serverCfg.SharedStorage != "" { es, err := cloud.ExternalStorageFromURI(ctx, serverCfg.SharedStorage, @@ -1575,6 +1576,10 @@ func init() { f = debugZipUploadCmd.Flags() f.StringVar(&debugZipUploadOpts.ddAPIKey, "dd-api-key", "", "Datadog API key to use to send debug.zip artifacts to datadog") + f.StringVar(&debugZipUploadOpts.ddAPPKey, "dd-app-key", "", + "Datadog APP key to use to send debug.zip artifacts to datadog") + f.StringVar(&debugZipUploadOpts.ddSite, "dd-site", defaultDDSite, + "Datadog site to use to send debug.zip artifacts to datadog") f.StringSliceVar(&debugZipUploadOpts.include, "include", nil, "The debug zip artifacts to include. Possible values: "+strings.Join(zipArtifactTypes, ", ")) f.StringSliceVar(&debugZipUploadOpts.tags, "tags", nil, @@ -1582,6 +1587,12 @@ func init() { "\nExample: --tags \"env:prod,customer:xyz\"") f.StringVar(&debugZipUploadOpts.clusterName, "cluster", "", "Name of the cluster to associate with the debug zip artifacts. This can be used to identify data in the upstream observability tool.") + f.Var(&debugZipUploadOpts.from, "from", "oldest timestamp to include (inclusive)") + f.Var(&debugZipUploadOpts.to, "to", "newest timestamp to include (inclusive)") + f.StringVar(&debugZipUploadOpts.logFormat, "log-format", "", + "log format of the input files") + f.StringVar(&debugZipUploadOpts.gcpProjectID, "gcp-project-id", + defaultGCPProjectID, "GCP project ID to use to send debug.zip logs to GCS") f = debugDecodeKeyCmd.Flags() f.Var(&decodeKeyOptions.encoding, "encoding", "key argument encoding") diff --git a/pkg/cli/democluster/demo_cluster.go b/pkg/cli/democluster/demo_cluster.go index eb10961937bc..dafae0e7f786 100644 --- a/pkg/cli/democluster/demo_cluster.go +++ b/pkg/cli/democluster/demo_cluster.go @@ -1486,6 +1486,7 @@ func (c *transientCluster) generateCerts(ctx context.Context, certsDir string) ( true, /* overwrite */ username.RootUserName(), nil, /* tenantIDs - this makes it valid for all tenants */ + nil, /* tenantNames - this makes it valid for all tenants */ true, /* generatePKCS8Key */ ); err != nil { return err @@ -1502,6 +1503,7 @@ func (c *transientCluster) generateCerts(ctx context.Context, certsDir string) ( true, /* overwrite */ demoUser, nil, /* tenantIDs - this makes it valid for all tenants */ + nil, /* tenantNames - this makes it valid for all tenants */ true, /* generatePKCS8Key */ ); err != nil { return err diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go index 8b2a39ae80bb..092161be69cb 100644 --- a/pkg/cli/flags.go +++ b/pkg/cli/flags.go @@ -191,6 +191,41 @@ func (t tenantIDSetter) Set(v string) error { return nil } +// tenantNameSetter wraps a list of roachpb.TenantNames and enables setting +// them via a command-line flag. +type tenantNameSetter struct { + tenantNames *[]roachpb.TenantName +} + +// String implements the pflag.Value interface. +func (t tenantNameSetter) String() string { + var tenantString strings.Builder + separator := "" + for _, tName := range *t.tenantNames { + tenantString.WriteString(separator) + tenantString.WriteString(string(tName)) + separator = "," + } + return tenantString.String() +} + +// Type implements the pflag.Value interface. +func (t tenantNameSetter) Type() string { return "<[]TenantName>" } + +// Set implements the pflag.Value interface. +func (t tenantNameSetter) Set(v string) error { + *t.tenantNames = []roachpb.TenantName{} + tenantScopes := strings.Split(v, "," /* separator */) + for _, tenantScope := range tenantScopes { + tenant := roachpb.TenantName(tenantScope) + if err := tenant.IsValid(); err != nil { + return err + } + *t.tenantNames = append(*t.tenantNames, roachpb.TenantName(tenantScope)) + } + return nil +} + // Set implements the pflag.Value interface. func (a clusterNameSetter) Set(v string) error { if v == "" { @@ -617,6 +652,7 @@ func init() { if cmd == createClientCertCmd { cliflagcfg.VarFlag(f, &tenantIDSetter{tenantIDs: &certCtx.tenantScope}, cliflags.TenantScope) + cliflagcfg.VarFlag(f, &tenantNameSetter{tenantNames: &certCtx.tenantNameScope}, cliflags.TenantScopeByNames) // PKCS8 key format is only available for the client cert command. cliflagcfg.BoolFlag(f, &certCtx.generatePKCS8Key, cliflags.GeneratePKCS8Key) diff --git a/pkg/cli/flags_test.go b/pkg/cli/flags_test.go index 9d4e1ba9cb7d..faf82c214475 100644 --- a/pkg/cli/flags_test.go +++ b/pkg/cli/flags_test.go @@ -1529,6 +1529,34 @@ func TestTenantID(t *testing.T) { } } +func TestTenantName(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + tests := []struct { + name string + arg string + errContains string + }{ + {"empty tenant name text", "", "invalid tenant name: \"\""}, + {"tenant name not valid", "a+bc", "invalid tenant name: \"a+bc\""}, + {"tenant name \"abc\" is valid", "abc", ""}, + {"tenant name \"system\" is valid", "system", ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tns := tenantNameSetter{tenantNames: &[]roachpb.TenantName{}} + err := tns.Set(tt.arg) + if tt.errContains == "" { + assert.NoError(t, err) + assert.Equal(t, tt.arg, tns.String()) + } else { + assert.True(t, strings.Contains(err.Error(), tt.errContains)) + } + }) + } +} + func TestTenantIDFromFile(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/cli/testdata/doctor/test_examine_cluster b/pkg/cli/testdata/doctor/test_examine_cluster index 40c4339e5705..5dc28a7efa8a 100644 --- a/pkg/cli/testdata/doctor/test_examine_cluster +++ b/pkg/cli/testdata/doctor/test_examine_cluster @@ -1,7 +1,7 @@ debug doctor examine cluster ---- debug doctor examine cluster -Examining 63 descriptors and 62 namespace entries... +Examining 64 descriptors and 63 namespace entries... ParentID 100, ParentSchemaID 101: relation "foo" (105): expected matching namespace entry, found none -Examining 10 jobs... +Examining 11 jobs... ERROR: validation failed diff --git a/pkg/cli/testdata/doctor/test_examine_cluster_dropped b/pkg/cli/testdata/doctor/test_examine_cluster_dropped index 2118e0bd3469..75feb6cf5dde 100644 --- a/pkg/cli/testdata/doctor/test_examine_cluster_dropped +++ b/pkg/cli/testdata/doctor/test_examine_cluster_dropped @@ -1,6 +1,6 @@ debug doctor examine cluster ---- debug doctor examine cluster -Examining 62 descriptors and 62 namespace entries... -Examining 8 jobs... +Examining 63 descriptors and 63 namespace entries... +Examining 9 jobs... No problems found! diff --git a/pkg/cli/testdata/doctor/test_examine_cluster_jobs b/pkg/cli/testdata/doctor/test_examine_cluster_jobs index 8ba58ad8df93..373f80aaeb3b 100644 --- a/pkg/cli/testdata/doctor/test_examine_cluster_jobs +++ b/pkg/cli/testdata/doctor/test_examine_cluster_jobs @@ -1,11 +1,11 @@ debug doctor examine cluster ---- debug doctor examine cluster -Examining 62 descriptors and 63 namespace entries... +Examining 63 descriptors and 64 namespace entries... ParentID 183, ParentSchemaID 381: relation "foo" (104): referenced database ID 183: referenced descriptor not found ParentID 183, ParentSchemaID 381: relation "foo" (104): referenced schema ID 381: referenced descriptor not found ParentID 183, ParentSchemaID 381: relation "foo" (104): expected matching namespace entry, found none ParentID 183, ParentSchemaID 381: relation "foo" (104): mutation job 962952277419655169: job 962952277419655169 not found ParentID 100, ParentSchemaID 101: namespace entry "foo" (104): mismatched name "foo" in relation descriptor -Examining 8 jobs... +Examining 9 jobs... ERROR: validation failed diff --git a/pkg/cli/testdata/tsdump/json b/pkg/cli/testdata/tsdump/json index 15a14a80c4f9..e7a6ee14264d 100644 --- a/pkg/cli/testdata/tsdump/json +++ b/pkg/cli/testdata/tsdump/json @@ -31,4 +31,4 @@ cr.node.admission.admitted.elastic.cpu 2 1.000000 1711130560 ---- POST: https://example.com/data DD-API-KEY: api-key -Body: {"series":[{"metric":"crdb.tsdump.admission.admitted.elastic.cpu","type":0,"points":[{"timestamp":17111304,"value":0},{"timestamp":17111304,"value":1},{"timestamp":17111304,"value":1},{"timestamp":17111305,"value":1}],"resources":null,"tags":["cluster_type:SELF_HOSTED","job:cockroachdb","region:local","cluster:test-cluster","upload_id:test-cluster-1234","node_id:1"]},{"metric":"crdb.tsdump.admission.admitted.elastic.cpu","type":0,"points":[{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1}],"resources":null,"tags":["cluster_type:SELF_HOSTED","job:cockroachdb","region:local","cluster:test-cluster","upload_id:test-cluster-1234","node_id:2"]}]} +Body: {"series":[{"metric":"crdb.tsdump.admission.admitted.elastic.cpu","type":0,"points":[{"timestamp":17111304,"value":0},{"timestamp":17111304,"value":1},{"timestamp":17111304,"value":1},{"timestamp":17111305,"value":1}],"resources":null,"tags":["cluster_type:SELF_HOSTED","job:cockroachdb","region:local","cluster_label:test-cluster","upload_id:test-cluster-1234","node_id:1"]},{"metric":"crdb.tsdump.admission.admitted.elastic.cpu","type":0,"points":[{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1},{"timestamp":17111305,"value":1}],"resources":null,"tags":["cluster_type:SELF_HOSTED","job:cockroachdb","region:local","cluster_label:test-cluster","upload_id:test-cluster-1234","node_id:2"]}]} diff --git a/pkg/cli/testdata/upload/logs b/pkg/cli/testdata/upload/logs new file mode 100644 index 000000000000..0fcd1b2f635b --- /dev/null +++ b/pkg/cli/testdata/upload/logs @@ -0,0 +1,86 @@ +# Single-node +upload-logs +{ + "nodes": { + "1": { + "logs": [ + { + "name": "cockroach.hostname.username.2024-07-16T17_51_43Z.048498.log", + "lines": [ + "I240716 17:51:44.661864 100 server/node.go:533 ⋮ [T1,n1] 24 initialized store s1", + "W240716 17:51:44.667017 100 server/env_sampler.go:125 ⋮ [T1,n1] 33 failed to start query profiler worker: failed to detect cgroup memory limit: failed to read memory cgroup from cgroups file: ‹/proc/self/cgroup›: open ‹/proc/self/cgroup›: no such file or directory" + ] + } + ] + } + } +} +---- +ABC/123/dt=20240716/hour=17: +Upload ID: 123 +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --cluster=ABC --include=logs --log-format=crdb-v1 +{"data":{"type":"archives","attributes":{"name":"123","query":"-*","destination":{"type":"gcs","path":"ABC/123","bucket":"debugzip-archives","integration":{"project_id":"arjun-sandbox-424904","client_email":"datadog-archive@arjun-sandbox-424904.iam.gserviceaccount.com"}}}}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"failed to start query profiler worker: failed to detect cgroup memory limit: failed to read memory cgroup from cgroups file: /proc/self/cgroup: open /proc/self/cgroup: no such file or directory","tags":["cluster:ABC","env:debug","node_id:1","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":100,"file":"server/env_sampler.go","line":125,"counter":33,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"WARNING"}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"initialized store s1","tags":["cluster:ABC","env:debug","node_id:1","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":100,"file":"server/node.go","line":533,"counter":24,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"INFO"}} + + +# single-node with wrong log format +upload-logs log-format=crdb-v2 +{ + "nodes": { + "1": { + "logs": [ + { + "name": "cockroach.hostname.username.2024-07-16T17_51_43Z.048498.log", + "lines": [ + "I240716 17:51:44.661864 100 server/node.go:533 ⋮ [T1,n1] 24 initialized store s1", + "W240716 17:51:44.667017 100 server/env_sampler.go:125 ⋮ [T1,n1] 33 failed to start query profiler worker: failed to detect cgroup memory limit: failed to read memory cgroup from cgroups file: ‹/proc/self/cgroup›: open ‹/proc/self/cgroup›: no such file or directory" + ] + } + ] + } + } +} +---- +Failed to upload logs: decoding on line 2: malformed log entry +Upload ID: 123 +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --cluster=ABC --include=logs --log-format=crdb-v2 + + +# multi-node +upload-logs +{ + "nodes": { + "1": { + "logs": [ + { + "name": "cockroach.node1.username.2024-07-16T17_51_43Z.048498.log", + "lines": [ + "I240716 17:51:44.661864 100 server/node.go:533 ⋮ [T1,n1] 24 initialized store s1", + "W240716 17:51:44.667017 100 server/env_sampler.go:125 ⋮ [T1,n1] 33 failed to start query profiler worker: failed to detect cgroup memory limit: failed to read memory cgroup from cgroups file: ‹/proc/self/cgroup›: open ‹/proc/self/cgroup›: no such file or directory" + ] + } + ] + }, + "2": { + "logs": [ + { + "name": "cockroach.node2.username.2024-07-16T17_51_43Z.048498.log", + "lines": [ + "I240716 17:51:44.797342 916 sql/sqlliveness/slstorage/slstorage.go:540 ⋮ [T1,n1] 43 inserted sqlliveness session 01018071445fbd54a44ee88e906efb311d7193", + "I240716 17:51:44.797530 916 sql/sqlliveness/slinstance/slinstance.go:258 ⋮ [T1,n1] 44 created new SQL liveness session 01018071445fbd54a44ee88e906efb311d7193" + ] + } + ] + } + } +} +---- +ABC/123/dt=20240716/hour=17: +Upload ID: 123 +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --cluster=ABC --include=logs --log-format=crdb-v1 +{"data":{"type":"archives","attributes":{"name":"123","query":"-*","destination":{"type":"gcs","path":"ABC/123","bucket":"debugzip-archives","integration":{"project_id":"arjun-sandbox-424904","client_email":"datadog-archive@arjun-sandbox-424904.iam.gserviceaccount.com"}}}}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"created new SQL liveness session 01018071445fbd54a44ee88e906efb311d7193","tags":["cluster:ABC","env:debug","node_id:2","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":916,"file":"sql/sqlliveness/slinstance/slinstance.go","line":258,"counter":44,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"INFO"}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"failed to start query profiler worker: failed to detect cgroup memory limit: failed to read memory cgroup from cgroups file: /proc/self/cgroup: open /proc/self/cgroup: no such file or directory","tags":["cluster:ABC","env:debug","node_id:1","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":100,"file":"server/env_sampler.go","line":125,"counter":33,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"WARNING"}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"initialized store s1","tags":["cluster:ABC","env:debug","node_id:1","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":100,"file":"server/node.go","line":533,"counter":24,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"INFO"}} +{"timestamp":1721152304,"date":"2024-07-16T17:51:44Z","message":"inserted sqlliveness session 01018071445fbd54a44ee88e906efb311d7193","tags":["cluster:ABC","env:debug","node_id:2","service:CRDB-SH","source:cockroachdb","upload_id:123"],"_id":"a1b2c3","attributes":{"goroutine":916,"file":"sql/sqlliveness/slstorage/slstorage.go","line":540,"counter":43,"tenant_id":"1","date":"2024-07-16T17:51:44Z","timestamp":1721152304,"channel":"DEV","severity":"INFO"}} diff --git a/pkg/cli/testdata/upload/profiles b/pkg/cli/testdata/upload/profiles index f1d91d148297..54012b5b3863 100644 --- a/pkg/cli/testdata/upload/profiles +++ b/pkg/cli/testdata/upload/profiles @@ -1,104 +1,123 @@ # Single-node - both profiles upload-profiles { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 }, - { "type": "heap", "timestamp": 1718974401, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20}, + {"type": "heap", "timestamp": 1718974401, "duration": 20} + ] + } + } } ---- -Explore this profile on datadog: https://{{ datadog domain }}/profiling/explorer?query=upload_id:123 Upload ID: 123 -Uploaded profiles of node 1 to datadog (debugDir/nodes/1/cpu.pprof, debugDir/nodes/1/heap.pprof) -debug zip upload debugDir --dd-api-key=dd-api-key --cluster=ABC -{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,node_id:1,service:CRDB-SH,upload_id:123","family":"go","version":"4"} +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --cluster=ABC --include=profiles +{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,node_id:1,service:CRDB-SH,source:cockroachdb,upload_id:123","family":"go","version":"4"} # Multi-node - both profiles upload-profiles tags=foo:bar { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 }, - { "type": "heap", "timestamp": 1718974401, "duration": 20 } - ], - "2": [ - { "type": "cpu", "timestamp": 1718974543, "duration": 20 }, - { "type": "heap", "timestamp": 1718974535, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20}, + {"type": "heap", "timestamp": 1718974401, "duration": 20} + ] + }, + "2": { + "profiles": [ + {"type": "cpu", "timestamp": 1718974543, "duration": 20}, + {"type": "heap", "timestamp": 1718974535, "duration": 20} + ] + } + } } ---- -Explore this profile on datadog: https://{{ datadog domain }}/profiling/explorer?query=upload_id:123 -Explore this profile on datadog: https://{{ datadog domain }}/profiling/explorer?query=upload_id:123 Upload ID: 123 -Uploaded profiles of node 1 to datadog (debugDir/nodes/1/cpu.pprof, debugDir/nodes/1/heap.pprof) -Uploaded profiles of node 2 to datadog (debugDir/nodes/2/cpu.pprof, debugDir/nodes/2/heap.pprof) -debug zip upload debugDir --dd-api-key=dd-api-key --tags=foo:bar --cluster=ABC -{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,foo:bar,node_id:1,service:CRDB-SH,upload_id:123","family":"go","version":"4"} -{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,foo:bar,node_id:2,service:CRDB-SH,upload_id:123","family":"go","version":"4"} +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --tags=foo:bar --cluster=ABC --include=profiles +{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,foo:bar,node_id:1,service:CRDB-SH,source:cockroachdb,upload_id:123","family":"go","version":"4"} +{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:debug,foo:bar,node_id:2,service:CRDB-SH,source:cockroachdb,upload_id:123","family":"go","version":"4"} # Single-node - only CPU profile upload-profiles tags=customer:user-given-name,cluster:XYZ { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20} + ] + } + } } ---- -Explore this profile on datadog: https://{{ datadog domain }}/profiling/explorer?query=upload_id:123 Upload ID: 123 -Uploaded profiles of node 1 to datadog (debugDir/nodes/1/cpu.pprof) -debug zip upload debugDir --dd-api-key=dd-api-key --tags=customer:user-given-name,cluster:XYZ --cluster=ABC -{"start":"","end":"","attachments":["cpu.pprof"],"tags_profiler":"cluster:XYZ,customer:user-given-name,env:debug,foo:bar,node_id:1,service:CRDB-SH,upload_id:123","family":"go","version":"4"} +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --tags=customer:user-given-name,cluster:XYZ --cluster=ABC --include=profiles +{"start":"","end":"","attachments":["cpu.pprof"],"tags_profiler":"cluster:XYZ,customer:user-given-name,env:debug,foo:bar,node_id:1,service:CRDB-SH,source:cockroachdb,upload_id:123","family":"go","version":"4"} # Single-node - no profiles found upload-profiles { - "1": [] + "nodes": { + "1": {} + } } ---- Upload ID: 123 -debug zip upload debugDir --dd-api-key=dd-api-key --cluster=ABC +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --cluster=ABC --include=profiles # Colliding tags - env provided by the user should take precedence upload-profiles tags=env:SH { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 }, - { "type": "heap", "timestamp": 1718974401, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20}, + {"type": "heap", "timestamp": 1718974401, "duration": 20} + ] + } + } } ---- -Explore this profile on datadog: https://{{ datadog domain }}/profiling/explorer?query=upload_id:123 Upload ID: 123 -Uploaded profiles of node 1 to datadog (debugDir/nodes/1/cpu.pprof, debugDir/nodes/1/heap.pprof) -debug zip upload debugDir --dd-api-key=dd-api-key --tags=env:SH --cluster=ABC -{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:SH,node_id:1,service:CRDB-SH,upload_id:123","family":"go","version":"4"} +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --tags=env:SH --cluster=ABC --include=profiles +{"start":"","end":"","attachments":["cpu.pprof","heap.pprof"],"tags_profiler":"cluster:ABC,env:SH,node_id:1,service:CRDB-SH,source:cockroachdb,upload_id:123","family":"go","version":"4"} # Single-node - both profiles upload-profiles tags=ERR { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 }, - { "type": "heap", "timestamp": 1718974401, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20}, + {"type": "heap", "timestamp": 1718974401, "duration": 20} + ] + } + } } ---- -ERROR: Failed to upload profiles of node 1 to datadog (debugDir/nodes/1/cpu.pprof, debugDir/nodes/1/heap.pprof): 'runtime' is a required field -debug zip upload debugDir --dd-api-key=dd-api-key --tags=ERR --cluster=ABC +Failed to upload profiles: failed to upload profiles of node 1: status: 400, body: 'runtime' is a required field +Upload ID: 123 +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --tags=ERR --cluster=ABC --include=profiles # Customer name not provided by the user upload-profiles tags=foo:bar skip-cluster-name=true { - "1": [ - { "type": "cpu", "timestamp": 1718972610, "duration": 20 }, - { "type": "heap", "timestamp": 1718974401, "duration": 20 } - ] + "nodes": { + "1": { + "profiles": [ + {"type": "cpu", "timestamp": 1718972610, "duration": 20}, + {"type": "heap", "timestamp": 1718974401, "duration": 20} + ] + } + } } ---- ERROR: cluster name is required for uploading profiles -debug zip upload debugDir --dd-api-key=dd-api-key --tags=foo:bar +debug zip upload debugDir --dd-api-key=dd-api-key --dd-app-key=dd-app-key --tags=foo:bar --include=profiles diff --git a/pkg/cli/testdata/zip/partial1 b/pkg/cli/testdata/zip/partial1 index b523d1e483fa..18befc011929 100644 --- a/pkg/cli/testdata/zip/partial1 +++ b/pkg/cli/testdata/zip/partial1 @@ -282,3 +282,4 @@ debug zip --concurrency=1 --cpu-profile-duration=0s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/partial1_excluded b/pkg/cli/testdata/zip/partial1_excluded index b215a4a39ed5..0c23ccd1cecc 100644 --- a/pkg/cli/testdata/zip/partial1_excluded +++ b/pkg/cli/testdata/zip/partial1_excluded @@ -179,3 +179,4 @@ debug zip /dev/null --concurrency=1 --exclude-nodes=2 --cpu-profile-duration=0 [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/partial2 b/pkg/cli/testdata/zip/partial2 index cfd005a52ab6..957ef2815d38 100644 --- a/pkg/cli/testdata/zip/partial2 +++ b/pkg/cli/testdata/zip/partial2 @@ -178,3 +178,4 @@ debug zip --concurrency=1 --cpu-profile-duration=0 /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip b/pkg/cli/testdata/zip/testzip index badcb676bd19..5d7a7bfff1e1 100644 --- a/pkg/cli/testdata/zip/testzip +++ b/pkg/cli/testdata/zip/testzip @@ -134,3 +134,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_concurrent b/pkg/cli/testdata/zip/testzip_concurrent index e91f6305d766..57eb706818bf 100644 --- a/pkg/cli/testdata/zip/testzip_concurrent +++ b/pkg/cli/testdata/zip/testzip_concurrent @@ -1,5 +1,8 @@ zip ---- +[cluster] capture debug zip flags... +[cluster] capture debug zip flags: done +[cluster] capture debug zip flags: writing binary output: debug/debug_zip_command_flags.txt... [cluster] collecting the inflight traces for jobs... [cluster] collecting the inflight traces for jobs: done [cluster] collecting the inflight traces for jobs: received response... diff --git a/pkg/cli/testdata/zip/testzip_exclude_goroutine_stacks b/pkg/cli/testdata/zip/testzip_exclude_goroutine_stacks index 0fdf44914143..4784ee500529 100644 --- a/pkg/cli/testdata/zip/testzip_exclude_goroutine_stacks +++ b/pkg/cli/testdata/zip/testzip_exclude_goroutine_stacks @@ -133,4 +133,5 @@ debug zip --concurrency=1 --cpu-profile-duration=1s --include-goroutine-stacks=f [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done [cluster] NOTE: Omitted node-level goroutine stack dumps from this debug zip bundle. Use the --include-goroutine-stacks flag to enable the fetching of this data. diff --git a/pkg/cli/testdata/zip/testzip_exclude_range_info b/pkg/cli/testdata/zip/testzip_exclude_range_info index 5453e2c9230e..18f94fead719 100644 --- a/pkg/cli/testdata/zip/testzip_exclude_range_info +++ b/pkg/cli/testdata/zip/testzip_exclude_range_info @@ -126,3 +126,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s --include-range-info=false / [node 1] requesting log files list... received response... done [node ?] ? log files found [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_external_process_virtualization b/pkg/cli/testdata/zip/testzip_external_process_virtualization index 4ab199e9edb9..52dd014c1ada 100644 --- a/pkg/cli/testdata/zip/testzip_external_process_virtualization +++ b/pkg/cli/testdata/zip/testzip_external_process_virtualization @@ -157,3 +157,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_fallback b/pkg/cli/testdata/zip/testzip_fallback index 4d4ba0fa0a09..c2b059738fbf 100644 --- a/pkg/cli/testdata/zip/testzip_fallback +++ b/pkg/cli/testdata/zip/testzip_fallback @@ -273,3 +273,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_include_goroutine_stacks b/pkg/cli/testdata/zip/testzip_include_goroutine_stacks index badcb676bd19..5d7a7bfff1e1 100644 --- a/pkg/cli/testdata/zip/testzip_include_goroutine_stacks +++ b/pkg/cli/testdata/zip/testzip_include_goroutine_stacks @@ -134,3 +134,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_include_range_info b/pkg/cli/testdata/zip/testzip_include_range_info index 3539f7b36c48..2a3a2fc67a3b 100644 --- a/pkg/cli/testdata/zip/testzip_include_range_info +++ b/pkg/cli/testdata/zip/testzip_include_range_info @@ -134,3 +134,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s --include-range-info /dev/nu [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_redacted b/pkg/cli/testdata/zip/testzip_redacted index dd3c4860f818..d1a5ebe7f462 100644 --- a/pkg/cli/testdata/zip/testzip_redacted +++ b/pkg/cli/testdata/zip/testzip_redacted @@ -134,3 +134,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s --redact /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_shared_process_virtualization b/pkg/cli/testdata/zip/testzip_shared_process_virtualization index 189e943ed901..c6e9c3ab3137 100644 --- a/pkg/cli/testdata/zip/testzip_shared_process_virtualization +++ b/pkg/cli/testdata/zip/testzip_shared_process_virtualization @@ -134,6 +134,7 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done [cluster] establishing RPC connection to ... [cluster] using SQL address: ... [cluster] requesting data for debug/cluster/test-tenant/events... received response... writing JSON output: debug/cluster/test-tenant/events.json... done @@ -291,3 +292,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/cluster/test-tenant/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/cluster/test-tenant/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/cluster/test-tenant/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/cluster/test-tenant/debug_zip_command_flags.txt... done diff --git a/pkg/cli/testdata/zip/testzip_shared_process_virtualization_with_default_tenant b/pkg/cli/testdata/zip/testzip_shared_process_virtualization_with_default_tenant index 189e943ed901..c6e9c3ab3137 100644 --- a/pkg/cli/testdata/zip/testzip_shared_process_virtualization_with_default_tenant +++ b/pkg/cli/testdata/zip/testzip_shared_process_virtualization_with_default_tenant @@ -134,6 +134,7 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/debug_zip_command_flags.txt... done [cluster] establishing RPC connection to ... [cluster] using SQL address: ... [cluster] requesting data for debug/cluster/test-tenant/events... received response... writing JSON output: debug/cluster/test-tenant/events.json... done @@ -291,3 +292,4 @@ debug zip --concurrency=1 --cpu-profile-duration=1s /dev/null [cluster] pprof summary script... writing binary output: debug/cluster/test-tenant/pprof-summary.sh... done [cluster] hot range summary script... writing binary output: debug/cluster/test-tenant/hot-ranges.sh... done [cluster] tenant hot range summary script... writing binary output: debug/cluster/test-tenant/hot-ranges-tenant.sh... done +[cluster] capture debug zip flags... writing binary output: debug/cluster/test-tenant/debug_zip_command_flags.txt... done diff --git a/pkg/cli/tsdump.go b/pkg/cli/tsdump.go index e623f06c3f20..112009308237 100644 --- a/pkg/cli/tsdump.go +++ b/pkg/cli/tsdump.go @@ -449,7 +449,7 @@ func (d *datadogWriter) Emit(data *tspb.TimeSeriesData) error { tags = append(tags, "region:local") if debugTimeSeriesDumpOpts.clusterLabel != "" { - tags = append(tags, makeDDTag("cluster", debugTimeSeriesDumpOpts.clusterLabel)) + tags = append(tags, makeDDTag("cluster_label", debugTimeSeriesDumpOpts.clusterLabel)) } tags = append(tags, makeDDTag(uploadIDTag, d.uploadID)) diff --git a/pkg/cli/zip.go b/pkg/cli/zip.go index 6ccafac06f7e..1428f3ab9bc7 100644 --- a/pkg/cli/zip.go +++ b/pkg/cli/zip.go @@ -64,6 +64,23 @@ type debugZipContext struct { sem semaphore.Semaphore } +var filterFlags = map[string]struct{}{ + "cert-principal-map": {}, + "certs-dir": {}, + "cluster-name": {}, + "disable-cluster-name-verification": {}, + "format": {}, + "host": {}, + "url": {}, + "enterprise-require-fips-ready": {}, + "log": {}, + "log-config-file": {}, + "log-config-vars": {}, + "log-dir": {}, + "logtostderr": {}, + "vmodule": {}, +} + func (zc *debugZipContext) runZipFn( ctx context.Context, s *zipReporter, fn func(ctx context.Context) error, ) error { @@ -333,7 +350,7 @@ func runDebugZip(cmd *cobra.Command, args []string) (retErr error) { // Add a little helper script to draw attention to the existence of tags in // the profiles. { - s := zc.clusterPrinter.start("pprof summary script") + s = zc.clusterPrinter.start("pprof summary script") if err := z.createRaw(s, zc.prefix+"/pprof-summary.sh", []byte(`#!/bin/sh find . -name cpu.pprof -print0 | xargs -0 go tool pprof -tags `)); err != nil { @@ -343,7 +360,7 @@ find . -name cpu.pprof -print0 | xargs -0 go tool pprof -tags // A script to summarize the hottest ranges for a storage server's range reports. if zipCtx.includeRangeInfo { - s := zc.clusterPrinter.start("hot range summary script") + s = zc.clusterPrinter.start("hot range summary script") if err := z.createRaw(s, zc.prefix+"/hot-ranges.sh", []byte(`#!/bin/sh for stat in "queries" "writes" "reads" "write_bytes" "read_bytes" "cpu_time"; do echo "$stat" @@ -356,7 +373,7 @@ done // A script to summarize the hottest ranges for a tenant's range report. if zipCtx.includeRangeInfo { - s := zc.clusterPrinter.start("tenant hot range summary script") + s = zc.clusterPrinter.start("tenant hot range summary script") if err := z.createRaw(s, zc.prefix+"/hot-ranges-tenant.sh", []byte(`#!/bin/sh for stat in "queries" "writes" "reads" "write_bytes" "read_bytes" "cpu_time"; do echo "$stat"_per_second @@ -366,6 +383,17 @@ done return err } } + + s = zr.start("capture debug zip flags") + flags := getCLIClusterFlags(true, cmd, func(flag string) bool { + _, filter := filterFlags[flag] + return filter + }) + + if err := z.createRaw(s, zc.prefix+"/debug_zip_command_flags.txt", []byte(flags)); err != nil { + return err + } + return nil }(); err != nil { return err diff --git a/pkg/cli/zip_test.go b/pkg/cli/zip_test.go index ded36dfcdeb7..e6e6407ca686 100644 --- a/pkg/cli/zip_test.go +++ b/pkg/cli/zip_test.go @@ -121,7 +121,8 @@ table_name NOT IN ( 'transaction_statistics_persisted_v22_2', 'transaction_statistics', 'tenant_usage_details', - 'pg_catalog_table_is_implemented' + 'pg_catalog_table_is_implemented', + 'fully_qualified_names' ) ORDER BY name ASC`) assert.NoError(t, err) @@ -186,6 +187,12 @@ func TestZipQueryFallback(t *testing.T) { skip.UnderRace(t, "test too slow under race") existing := zipInternalTablesPerCluster["crdb_internal.transaction_contention_events"] + + // Avoid leaking configuration changes after the tests end. + defer func() { + zipInternalTablesPerCluster["crdb_internal.transaction_contention_events"] = existing + }() + zipInternalTablesPerCluster["crdb_internal.transaction_contention_events"] = TableRegistryConfig{ nonSensitiveCols: existing.nonSensitiveCols, // We want this to fail to trigger the fallback. @@ -1195,3 +1202,53 @@ func TestZipJobTrace(t *testing.T) { jobutils.WaitForJobToSucceed(t, runner, importJobID) jobutils.WaitForJobToSucceed(t, runner, importJobID2) } + +// This test the command flags values set during command execution. +func TestCommandFlags(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + dir, cleanupFn := testutils.TempDir(t) + defer cleanupFn() + c := NewCLITest(TestCLIParams{ + StoreSpecs: []base.StoreSpec{{ + Path: dir, + }}, + }) + defer c.Cleanup() + + _, err := c.RunWithCapture("debug zip --concurrency=1 --cpu-profile-duration=0 --exclude-nodes=1" + + " --redact --nodes=1 --exclude-files=*.log --include-goroutine-stacks --include-running-job-traces " + dir + "/debug.zip") + if err != nil { + t.Fatal(err) + } + + r, err := zip.OpenReader(dir + "/debug.zip") + if err != nil { + t.Fatal(err) + } + + for _, f := range r.File { + if f.Name == "debug/debug_zip_command_flags.txt" { + rc, err := f.Open() + if err != nil { + t.Fatal(err) + } + defer rc.Close() + + actualFlags, err := io.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, " --concurrency=1 --cpu-profile-duration=0s --exclude-files=[*.log] --exclude-nodes=1"+ + " --include-goroutine-stacks=true --include-running-job-traces=true --insecure=false --nodes=1 --redact=true", + string(actualFlags)) + return + } + } + assert.Fail(t, "debug/debug_zip_command_flags.txt is not generated") + + if err = r.Close(); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/cli/zip_upload.go b/pkg/cli/zip_upload.go index ae9d2600553a..31ee50698417 100644 --- a/pkg/cli/zip_upload.go +++ b/pkg/cli/zip_upload.go @@ -12,24 +12,35 @@ package cli import ( "bytes" + "compress/gzip" "context" "encoding/json" "fmt" "io" + "math/rand" "mime/multipart" "net/http" "net/textproto" "os" "path" "path/filepath" + "regexp" "sort" "strings" "time" + "cloud.google.com/go/storage" "github.com/cockroachdb/cockroach/pkg/util/httputil" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/log/logpb" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/cockroach/pkg/util/system" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/errors" "github.com/spf13/cobra" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" ) type profileUploadEvent struct { @@ -44,43 +55,76 @@ type profileUploadEvent struct { type uploadZipArtifactFunc func(ctx context.Context, uuid string, debugDirPath string) error const ( - datadogAPIKeyHeader = "DD-API-KEY" + // default flag values + defaultDDSite = "us5" + defaultGCPProjectID = "arjun-sandbox-424904" // TODO: change this project ID + + // datadog HTTP headers + datadogAPIKeyHeader = "DD-API-KEY" + datadogAppKeyHeader = "DD-APPLICATION-KEY" + + // the path pattern to search for specific artifacts in the debug zip directory zippedProfilePattern = "nodes/*/*.pprof" - profileFamily = "go" - profileFormat = "pprof" + zippedLogsPattern = "nodes/*/logs/*" - // this is not the pprof version, but the version of the profile upload format supported by datadog + // this is not the pprof version, but the version of the profile + // upload format supported by datadog profileVersion = "4" + profileFamily = "go" // names of mandatory tag nodeIDTag = "node_id" uploadIDTag = "upload_id" clusterTag = "cluster" + + // datadog endpoint URLs + datadogProfileUploadURLTmpl = "https://intake.profile.%s/v1/input" + datadogCreateArchiveURLTmpl = "https://%s/api/v2/logs/config/archives" + + // datadog archive attributes + ddArchiveType = "archives" + ddArchiveDestinationType = "gcs" + ddArchiveQuery = "-*" // will make sure to not archive any live logs + ddArchiveBucketName = "debugzip-archives" + ddArchiveDefaultClient = "datadog-archive" // TODO(arjunmahishi): make this a flag also ) var debugZipUploadOpts = struct { - include []string - ddAPIKey string - ddProfileUploadURL string - clusterName string - tags []string + include []string + ddAPIKey string + ddAPPKey string + ddSite string + clusterName string + gcpProjectID string + tags []string + from, to timestampValue + logFormat string + maxConcurrentUploads int + reporter *zipUploadReporter }{ - ddProfileUploadURL: "https://intake.profile.datadoghq.com/v1/input", + maxConcurrentUploads: system.NumCPU(), + reporter: newReporter(os.Stderr), } // This is the list of all supported artifact types. The "possible values" part // in the help text is generated from this list. So, make sure to keep this updated -var zipArtifactTypes = []string{"profiles"} +// var zipArtifactTypes = []string{"profiles", "logs"} +// TODO(arjunmahishi): Removing the profiles upload for now. It has started +// failing for some reason. Will fix this later +var zipArtifactTypes = []string{"logs"} // uploadZipArtifactFuncs is a registry of handler functions for each artifact type. // While adding/removing functions from here, make sure to update // the zipArtifactTypes list as well var uploadZipArtifactFuncs = map[string]uploadZipArtifactFunc{ "profiles": uploadZipProfiles, + "logs": uploadZipLogs, } -// default tags -var ddProfileTags = []string{"service:CRDB-SH", "env:debug"} +// default datadog tags. Source has to be "cockroachdb" for the logs to be +// ingested correctly. This will make sure that the logs pass through the right +// pipeline which enriches the logs with more fields. +var defaultDDTags = []string{"service:CRDB-SH", "env:debug", "source:cockroachdb"} func runDebugZipUpload(cmd *cobra.Command, args []string) error { if err := validateZipUploadReadiness(); err != nil { @@ -96,11 +140,12 @@ func runDebugZipUpload(cmd *cobra.Command, args []string) error { artifactsToUpload = debugZipUploadOpts.include } - // run the upload functions - // TODO(arjunmahishi): Make this concurrent once there are multiple artifacts to upload + // run the upload functions for each artifact type. This can run sequentially. + // All the concurrency is contained within the upload functions. for _, artType := range artifactsToUpload { if err := uploadZipArtifactFuncs[artType](cmd.Context(), uploadID, args[0]); err != nil { - return err + // Log the error and continue with the next artifact + fmt.Printf("Failed to upload %s: %s\n", artType, err) } } @@ -109,6 +154,24 @@ func runDebugZipUpload(cmd *cobra.Command, args []string) error { } func validateZipUploadReadiness() error { + var ( + includeLookup = map[string]struct{}{} + artifactsToUpload = zipArtifactTypes + ) + + if len(debugZipUploadOpts.include) > 0 { + artifactsToUpload = debugZipUploadOpts.include + } + for _, inc := range artifactsToUpload { + if _, ok := includeLookup[inc]; ok { + // if the artifact type is already included, ignore the duplicate and + // continue + continue + } + + includeLookup[inc] = struct{}{} + } + if debugZipUploadOpts.ddAPIKey == "" { return fmt.Errorf("datadog API key is required for uploading profiles") } @@ -124,6 +187,24 @@ func validateZipUploadReadiness() error { } } + // validate the datadog site name + if _, ok := ddSiteToHostMap[debugZipUploadOpts.ddSite]; !ok { + return fmt.Errorf("unsupported datadog site '%s'", debugZipUploadOpts.ddSite) + } + + // special validations when logs are to be uploaded + _, ok := log.FormatParsers[debugZipUploadOpts.logFormat] + _, shouldUploadLogs := includeLookup["logs"] + if shouldUploadLogs { + if !ok { + return fmt.Errorf("unsupported log format '%s'", debugZipUploadOpts.logFormat) + } + + if debugZipUploadOpts.ddAPPKey == "" { + return fmt.Errorf("datadog APP key is required for uploading logs") + } + } + return nil } @@ -147,7 +228,7 @@ func uploadZipProfiles(ctx context.Context, uploadID string, debugDirPath string req, err := newProfileUploadReq( ctx, paths, appendUserTags( append( - ddProfileTags, makeDDTag(nodeIDTag, nodeID), makeDDTag(uploadIDTag, uploadID), + defaultDDTags, makeDDTag(nodeIDTag, nodeID), makeDDTag(uploadIDTag, uploadID), makeDDTag(clusterTag, debugZipUploadOpts.clusterName), ), // system generated tags debugZipUploadOpts.tags..., // user provided tags @@ -157,36 +238,13 @@ func uploadZipProfiles(ctx context.Context, uploadID string, debugDirPath string return err } - resp, err := doUploadProfileReq(req) - if err != nil { - return err - } - defer func() { - if err := resp.Body.Close(); err != nil { - fmt.Println("failed to close response body:", err) - } - }() - - if resp.StatusCode != http.StatusOK { - errMsg := fmt.Sprintf( - "Failed to upload profiles of node %s to datadog (%s)", - nodeID, (strings.Join(paths, ", ")), - ) - if resp.Body != nil { - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - return fmt.Errorf("%s: %s", errMsg, string(body)) - } - - return fmt.Errorf("%s: %s", errMsg, resp.Status) + if _, err := doUploadReq(req); err != nil { + return fmt.Errorf("failed to upload profiles of node %s: %w", nodeID, err) } - fmt.Printf("Uploaded profiles of node %s to datadog (%s)\n", nodeID, strings.Join(paths, ", ")) - fmt.Printf("Explore this profile on datadog: "+ - "https://{{ datadog domain }}/profiling/explorer?query=upload_id:%s\n", uploadID) + fmt.Fprintf(os.Stderr, "Uploaded profiles of node %s to datadog (%s)\n", nodeID, strings.Join(paths, ", ")) + fmt.Fprintf(os.Stderr, "Explore the profiles on datadog: "+ + "https://{{ datadog domain }}/profiling/explorer?query=%s:%s\n", uploadIDTag, uploadID) } return nil @@ -248,7 +306,7 @@ func newProfileUploadReq( return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, debugZipUploadOpts.ddProfileUploadURL, &body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, makeDDURL(datadogProfileUploadURLTmpl), &body) if err != nil { return nil, err } @@ -258,6 +316,323 @@ func newProfileUploadReq( return req, nil } +func uploadZipLogs(ctx context.Context, uploadID string, debugDirPath string) error { + paths, err := expandPatterns([]string{path.Join(debugDirPath, zippedLogsPattern)}) + if err != nil { + return err + } + + filePattern := regexp.MustCompile(logFilePattern) + files, err := findLogFiles( + paths, filePattern, nil, groupIndex(filePattern, "program"), + ) + if err != nil { + return err + } + + // chunkMap holds the mapping of target path names to the chunks of log lines + chunkMap := make(map[string][][]byte) + firstEventTime, lastEventTime := time.Time{}, time.Time{} + gcsPathPrefix := path.Join(debugZipUploadOpts.clusterName, uploadID) + for _, file := range files { + pathParts := strings.Split(strings.TrimPrefix(file.path, debugDirPath), "/") + inputEditMode := log.SelectEditMode(false /* redactable */, false /* redactInput */) + stream, err := newFileLogStream( + file, time.Time(debugZipUploadOpts.from), time.Time(debugZipUploadOpts.to), + inputEditMode, debugZipUploadOpts.logFormat, + ) + if err != nil { + return err + } + + for e, ok := stream.peek(); ok; e, ok = stream.peek() { + if firstEventTime.IsZero() { + firstEventTime = timeutil.Unix(0, e.Time) // from the first log entry + } + lastEventTime = timeutil.Unix(0, e.Time) // from the last log entry + + // The target path is constructed like this: //dt=20210901/hour=15 + targetPath := path.Join( + gcsPathPrefix, timeutil.Unix(0, e.Time).Format("dt=20060102/hour=15"), + ) + rawLine, err := logEntryToJSON(e, appendUserTags( + append( + defaultDDTags, makeDDTag(uploadIDTag, uploadID), makeDDTag(nodeIDTag, pathParts[2]), + makeDDTag(clusterTag, debugZipUploadOpts.clusterName), + ), // system generated tags + debugZipUploadOpts.tags..., // user provided tags + )) + if err != nil { + return err + } + + if _, ok := chunkMap[targetPath]; !ok { + chunkMap[targetPath] = [][]byte{} + } + + // TODO(arjunmahishi): Can this map hold all the data? We might be able + // to start the upload here itself. So that we don't have to keep + // everything in memory. But since we are running this on our mac books + // for now, we can afford to keep everything in memory. + chunkMap[targetPath] = append(chunkMap[targetPath], rawLine) + stream.pop() + } + + if err := stream.error(); err != nil { + if err.Error() == "EOF" { + continue + } + return err + } + } + + if err := writeLogsToGCS(ctx, chunkMap); err != nil { + return err + } + + if err := setupDDArchive(ctx, gcsPathPrefix, uploadID); err != nil { + return errors.Wrap(err, "failed to setup datadog archive") + } + + printRehydrationSteps(uploadID, uploadID, firstEventTime, lastEventTime) + return nil +} + +type ddArchivePayload struct { + Type string `json:"type"` + Attributes ddArchiveAttributes `json:"attributes"` +} + +type ddArchiveAttributes struct { + Name string `json:"name"` + Query string `json:"query"` + Destination ddArchiveDestination `json:"destination"` +} + +type ddArchiveDestination struct { + Type string `json:"type"` + Path string `json:"path"` + Bucket string `json:"bucket"` + Integration ddArchiveIntegration `json:"integration"` +} + +type ddArchiveIntegration struct { + ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` +} + +func setupDDArchive(ctx context.Context, pathPrefix, archiveName string) error { + rawPayload, err := json.Marshal(struct { + Data ddArchivePayload `json:"data"` + }{ + Data: ddArchivePayload{ + Type: ddArchiveType, + Attributes: ddArchiveAttributes{ + Name: archiveName, + Query: ddArchiveQuery, + Destination: ddArchiveDestination{ + Type: ddArchiveDestinationType, + Bucket: ddArchiveBucketName, + Path: pathPrefix, + Integration: ddArchiveIntegration{ + ProjectID: debugZipUploadOpts.gcpProjectID, + ClientEmail: fmt.Sprintf( + "%s@%s.iam.gserviceaccount.com", + ddArchiveDefaultClient, debugZipUploadOpts.gcpProjectID, + ), + }, + }, + }, + }, + }) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext( + ctx, http.MethodPost, makeDDURL(datadogCreateArchiveURLTmpl), bytes.NewReader(rawPayload), + ) + if err != nil { + return err + } + + req.Header.Set(httputil.ContentTypeHeader, httputil.JSONContentType) + req.Header.Set(datadogAPIKeyHeader, debugZipUploadOpts.ddAPIKey) + req.Header.Set(datadogAppKeyHeader, debugZipUploadOpts.ddAPPKey) + + if _, err := doUploadReq(req); err != nil { + return fmt.Errorf("failed to create datadog archive: %w", err) + } + + return nil +} + +type gcsWorkerSig struct { + key string + data []byte +} + +// writeLogsToGCS is a function that concurrently writes the logs to GCS. +// The chunkMap is expected to be a map of time-base paths to luist of log lines. +// +// Example: { +// "dt=20210901/hour=15": [[]byte, []byte, ...], +// } +// +// Each path will be uploaded as a separate file. The final file name will be +// randomly generated just be for uploading. +var writeLogsToGCS = func(ctx context.Context, chunkMap map[string][][]byte) error { + gcsClient, closeGCSClient, err := newGCSClient(ctx) + if err != nil { + return err + } + defer closeGCSClient() + + // The concurrency can be easily managed with something like sync/errors.Group + // But using channels gives us two advantages: + // 1. We can fail fast i.e exit as soon as one upload fails + // 2. We can monitor the progress as the uploads happen and report it to the + // CLI user (this seem like a non-feature but this is super useful when the + // upload times are long) + workChan := make(chan gcsWorkerSig, len(chunkMap)) + doneChan := make(chan error, len(chunkMap)) + for key, lines := range chunkMap { + workChan <- gcsWorkerSig{key, bytes.Join(lines, []byte("\n"))} + } + + // we can aggressively schedule the number of workers. Because this is a + // network IO bound workload. We will be waiting for the GCS API to complete + // the upload most of the time + noOfWorkers := min(system.NumCPU()*4, len(chunkMap)) + for i := 0; i < noOfWorkers; i++ { + go func() { + for sig := range workChan { + filename := path.Join(sig.key, fmt.Sprintf( + "archive_%s_%s_%s.json.gz", + newRandStr(6, true /* numericOnly */), newRandStr(4, true), newRandStr(22, false), + )) + + objectWriter := gcsClient.Bucket(ddArchiveBucketName).Object(filename).NewWriter(ctx) + w := gzip.NewWriter(objectWriter) + _, err := w.Write(sig.data) + if err != nil { + doneChan <- err + return + } + + if err := w.Close(); err != nil { + doneChan <- err + return + } + + if err := objectWriter.Close(); err != nil { + doneChan <- err + return + } + + doneChan <- nil + } + }() + } + + report := newReporter(os.Stderr).newReport("logs") + doneCount := 0.0 + for i := 0; i < len(chunkMap); i++ { + if err := <-doneChan; err != nil { + // stop everything and return the error + close(workChan) + close(doneChan) + return err + } + + doneCount++ + report((doneCount / float64(len(chunkMap))) * 100) + } + + close(workChan) + close(doneChan) + return nil +} + +func newGCSClient(ctx context.Context) (*storage.Client, func(), error) { + tokenSource, err := google.DefaultTokenSource(ctx) + if err != nil { + return nil, nil, err + } + + gcsClient, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) + if err != nil { + return nil, nil, err + } + + return gcsClient, func() { + // return a function that already handles the closing error + if err := gcsClient.Close(); err != nil { + fmt.Println(err) + } + }, nil +} + +type ddLogEntry struct { + logpb.Entry + + Date string `json:"date"` + Timestamp int64 `json:"timestamp"` + Channel string `json:"channel"` + Severity string `json:"severity"` + + // fields to be omitted + Message any `json:"message,omitempty"` + Time string `json:"time,omitempty"` + Tags string `json:"tags,omitempty"` +} + +// logEntryToJSON converts a logpb.Entry to a JSON byte slice and also +// transform a few fields to use the correct types. The JSON format is based on +// the specification provided by datadog. +// Refer: https://gist.github.com/ckelner/edc0e4efe4fa110f6b6b61f69d580171 +func logEntryToJSON(e logpb.Entry, tags []string) ([]byte, error) { + var message any = e.Message + if strings.HasPrefix(e.Message, "{") { + // If the message is already a JSON object, we don't want to escape it + // by wrapping it in quotes. Instead, we want to include it as a nested + // object in the final JSON output. So, we can override the Message field + // with the json.RawMessage instead of string. This will prevent the + // message from being escaped. + message = json.RawMessage(e.Message) + } + + date := timeutil.Unix(0, e.Time).Format(time.RFC3339) + timestamp := e.Time / 1e9 + + return json.Marshal(struct { + // override the following fields in the embedded logpb.Entry struct + Timestamp int64 `json:"timestamp"` + Date string `json:"date"` + Message any `json:"message"` + Tags []string `json:"tags"` + ID string `json:"_id"` + Attributes ddLogEntry `json:"attributes"` + }{ + Timestamp: timestamp, + Date: date, + Message: message, + Tags: tags, + ID: newRandStr(24, false /* numericOnly */), + Attributes: ddLogEntry{ + Entry: e, + Date: date, + Timestamp: timestamp, + Channel: e.Channel.String(), + Severity: e.Severity.String(), + + // remove the below fields via the omitempty tag + Time: "", + Tags: "", + }, + }) +} + // appendUserTags will make sure there are no duplicates in the final list of tags. // In case of duplicates, the user provided tags will take precedence. func appendUserTags(systemTags []string, tags ...string) []string { @@ -302,10 +677,32 @@ func makeDDTag(key, value string) string { return fmt.Sprintf("%s:%s", key, value) } -// doUploadProfileReq is a variable that holds the function that literally just sends the request. -// This is useful to mock the datadog API's response in tests. -var doUploadProfileReq = func(req *http.Request) (*http.Response, error) { - return http.DefaultClient.Do(req) +// doUploadReq is a variable that holds the function that makes the actual HTTP request. +// There is also some error handling logic in this function. This is a variable so that +// we can mock this function in the tests. +var doUploadReq = func(req *http.Request) ([]byte, error) { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + defer func() { + if err := resp.Body.Close(); err != nil { + fmt.Println("failed to close response body:", err) + } + }() + + rawBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // treat all non-2xx status codes as errors + if resp.StatusCode/100 != 2 { + return nil, fmt.Errorf("status code: %s, err message: %s", resp.Status, string(rawBody)) + } + + return rawBody, nil } // a wrapper around uuid.MakeV4().String() to make the tests more deterministic. @@ -319,3 +716,117 @@ var newUploadID = func(cluster string) string { ), ) } + +// newRandStr generates a random alphanumeric string of the given length. This is used +// for the _id field in the log entries and for the name of the archives +var newRandStr = func(length int, numericOnly bool) string { + charSet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + if numericOnly { + charSet = "0123456789" + } + + r := rand.New(rand.NewSource(timeutil.Now().UnixNano())) + b := make([]byte, length) + for i := range b { + b[i] = charSet[r.Intn(len(charSet))] + } + return string(b) +} + +func printRehydrationSteps(uploadID, archiveName string, from, to time.Time) { + msg := ` +The logs have been added to an archive and are ready for rehydration (ingestion). This has to be +triggered manually for now. This will be automated as soon as the datadog API supports it. + +Follow these steps to trigger rehydration: + + 1. Open this link in your browser: https://us5.datadoghq.com/logs/pipelines/historical-views/add + 2. In "Select Time Range" section, select the time range from "%s" to "%s" or a subset of it + 3. In "Select Archive" section, select the archive "%s" + 4. In "Name Historical Index", enter the name "%s" + 5. Click on "Rehydrate From Archive" + +You will receive an email notification once the rehydration is complete. +` + + // Month data year + timeFormat := "Jan 2 2006" + from = from.Truncate(time.Hour) // round down to the nearest hour + to = to.Add(time.Hour).Truncate(time.Hour) // round up to the nearest hour + fmt.Fprintf( + os.Stderr, msg, from.Format(timeFormat), to.Format(timeFormat), archiveName, uploadID, + ) +} + +// makeDDURL constructe the final datadog URL by replacing the site +// placeholder in the template. This is a simple convenience +// function. It assumes that the site is valid. This assumption is +// fine because we are validating the site early on in the flow. +func makeDDURL(tmpl string) string { + return fmt.Sprintf(tmpl, ddSiteToHostMap[debugZipUploadOpts.ddSite]) +} + +// zipUploadReporter is a simple concurrency-safe logger that can be used to +// report the progress on the upload of each artifact type in the debug zip. +// The log printed by this is updated in-place as the progress changes. +// Usage pattern: +// +// reporter := newReporter(os.Stderr) +// report := reporter.newReport("profiles") +// report(50) // 50% progress +// report(75) // 50% progress +// report(100) // 100% progress +type zipUploadReporter struct { + syncutil.RWMutex + reports map[string]float64 + output string + logWriter io.Writer +} + +func (r *zipUploadReporter) print() { + r.RLock() + defer r.RUnlock() + + // move the cursor to the top + currentLines := strings.Count(r.output, "\n") + if currentLines > 0 { + fmt.Fprintf(r.logWriter, "\033[%dA", currentLines) + } + + reports := []string{} + for name := range r.reports { + reports = append(reports, name) + } + sort.Strings(reports) + + var outputBuilder strings.Builder + for _, name := range reports { + progress := r.reports[name] + outputBuilder.WriteString(fmt.Sprintf("%s upload progress: %.2f%%\n", name, progress)) + } + + r.output = outputBuilder.String() + fmt.Fprint(r.logWriter, r.output) +} + +func (r *zipUploadReporter) newReport(name string) func(float64) { + r.Lock() + defer r.print() + defer r.Unlock() + + r.reports[name] = 0 + return func(progress float64) { + r.Lock() + defer r.print() + defer r.Unlock() + + r.reports[name] = progress + } +} + +func newReporter(logWriter io.Writer) *zipUploadReporter { + return &zipUploadReporter{ + reports: make(map[string]float64), + logWriter: logWriter, + } +} diff --git a/pkg/cli/zip_upload_test.go b/pkg/cli/zip_upload_test.go index ae98e0e930bd..36f56e0ccafe 100644 --- a/pkg/cli/zip_upload_test.go +++ b/pkg/cli/zip_upload_test.go @@ -12,6 +12,7 @@ package cli import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -28,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/log/logpb" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/datadriven" "github.com/google/pprof/profile" @@ -35,37 +37,71 @@ import ( "github.com/stretchr/testify/require" ) +type zipUploadTestContents struct { + Nodes map[int]struct { + Profiles []uploadProfileReq `json:"profiles"` + Logs []uploadLogsReq `json:"logs"` + } `json:"nodes"` +} + +type uploadLogsReq struct { + Name string `json:"name"` + Lines []string `json:"lines"` +} + type uploadProfileReq struct { Type string `json:"type"` Timestamp int64 `json:"timestamp"` Duration int64 `json:"duration"` } -func setupZipDirWithProfiles(t *testing.T, inputs map[int][]uploadProfileReq) (string, func()) { +func setupZipDir(t *testing.T, inputs zipUploadTestContents) (string, func()) { t.Helper() // make sure that the debug directory name is unique. Or the tests will be flaky. debugDir := path.Join(os.TempDir(), fmt.Sprintf("debug-%s/", uuid.MakeV4().String())) - for nodeID, nodeInputs := range inputs { - // create a subdirectory for each node + for nodeID, nodeInputs := range inputs.Nodes { + // setup profiles profDir := path.Join(debugDir, fmt.Sprintf("nodes/%d/", nodeID)) require.NoError(t, os.MkdirAll(profDir, 0755)) - for _, i := range nodeInputs { + for _, prof := range nodeInputs.Profiles { p := &profile.Profile{ - TimeNanos: time.Unix(i.Timestamp, 0).UnixNano(), - DurationNanos: i.Duration, + TimeNanos: time.Unix(prof.Timestamp, 0).UnixNano(), + DurationNanos: prof.Duration, SampleType: []*profile.ValueType{ - {Type: i.Type}, + {Type: prof.Type}, }, } file, err := os.Create( - path.Join(profDir, fmt.Sprintf("%s.pprof", i.Type)), + path.Join(profDir, fmt.Sprintf("%s.pprof", prof.Type)), ) require.NoError(t, err) require.NoError(t, p.Write(file)) + require.NoError(t, file.Close()) + } + + // setup logs + logDir := path.Join(debugDir, fmt.Sprintf("nodes/%d/logs", nodeID)) + require.NoError(t, os.MkdirAll(logDir, 0755)) + + for _, log := range nodeInputs.Logs { + var logBuilder bytes.Buffer + for _, line := range log.Lines { + logBuilder.WriteString(line) + logBuilder.WriteString("\n") + } + + file, err := os.Create( + path.Join(logDir, log.Name), + ) + require.NoError(t, err) + + _, err = file.Write(logBuilder.Bytes()) + require.NoError(t, err) + require.NoError(t, file.Close()) } } @@ -74,109 +110,103 @@ func setupZipDirWithProfiles(t *testing.T, inputs map[int][]uploadProfileReq) (s } } -func TestUploadZipProfiles(t *testing.T) { +func TestUploadZipEndToEnd(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) defer testutils.TestingHook(&newUploadID, func(string) string { return "123" })() + defer testutils.TestingHook(&newRandStr, func(l int, n bool) string { + if n { + return "123" + } + return "a1b2c3" + })() - defer testutils.TestingHook(&doUploadProfileReq, - func(req *http.Request) (*http.Response, error) { + defer testutils.TestingHook(&doUploadReq, + func(req *http.Request) ([]byte, error) { defer req.Body.Close() - _, params, _ := mime.ParseMediaType(req.Header.Get("Content-Type")) - reader := multipart.NewReader(req.Body, params["boundary"]) - - // find the "event" part in the multipart request and copy it to the final output - for { - part, err := reader.NextPart() - if err == io.EOF { - break - } - - if part.FormName() == "event" { - var event profileUploadEvent - require.NoError(t, json.NewDecoder(part).Decode(&event)) - - if strings.Contains(event.Tags, "ERR") { - // this is a test to simulate a client error - return &http.Response{ - StatusCode: 400, - Body: io.NopCloser(strings.NewReader("'runtime' is a required field")), - }, nil - } + switch req.URL.Path { + case "/v1/input": + return uploadProfileHook(t, req) + case "/api/v2/logs/config/archives": + return setupDDArchiveHook(t, req) + default: + return nil, fmt.Errorf( + "unexpected request is being made to datadog: %s", req.URL.Path, + ) + } + }, + )() - // validate the timestamps outside the data-driven test framework - // to keep the test deterministic. - start, err := time.Parse(time.RFC3339Nano, event.Start) - require.NoError(t, err) + defer testutils.TestingHook(&writeLogsToGCS, writeLogsToGCSHook)() - end, err := time.Parse(time.RFC3339Nano, event.End) - require.NoError(t, err) + datadriven.Walk(t, "testdata/upload", func(t *testing.T, path string) { + datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { + c := NewCLITest(TestCLIParams{}) + defer c.Cleanup() + debugZipUploadOpts.include = nil - require.Equal(t, time.Second*5, end.Sub(start)) - event.Start = "" - event.End = "" + var finaloutput bytes.Buffer - // require.NoError(t, json.NewEncoder(&finaloutput).Encode(event)) - rawEvent, err := json.Marshal(event) - require.NoError(t, err) + var testInput zipUploadTestContents + require.NoError(t, json.Unmarshal([]byte(d.Input), &testInput)) - // print the event so that it gets captured as a part of RunWithCapture - fmt.Println(string(rawEvent)) - } + var tags string + if d.HasArg("tags") { + d.ScanArgs(t, "tags", &tags) + tags = fmt.Sprintf("--tags=%s", tags) + } else { + debugZipUploadOpts.tags = nil } - return &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader("200 OK")), - }, nil - }, - )() - - datadriven.RunTest(t, "testdata/upload/profiles", func(t *testing.T, d *datadriven.TestData) string { - c := NewCLITest(TestCLIParams{}) - defer c.Cleanup() - - var finaloutput bytes.Buffer + clusterNameArg := "--cluster=ABC" + if d.HasArg("skip-cluster-name") { + debugZipUploadOpts.clusterName = "" + clusterNameArg = "" + } - var testInput map[int][]uploadProfileReq - require.NoError(t, json.Unmarshal([]byte(d.Input), &testInput)) + var includeFlag string // no include flag by default + switch d.Cmd { + case "upload-profiles": + includeFlag = "--include=profiles" + case "upload-logs": + var logFormat string + if d.HasArg("log-format") { + d.ScanArgs(t, "log-format", &logFormat) + } - var tags string - if d.HasArg("tags") { - d.ScanArgs(t, "tags", &tags) - tags = fmt.Sprintf("--tags=%s", tags) - } else { - debugZipUploadOpts.tags = nil - } + if logFormat == "" { + logFormat = "crdb-v1" + } - clusterNameArg := "--cluster=ABC" - if d.HasArg("skip-cluster-name") { - debugZipUploadOpts.clusterName = "" - clusterNameArg = "" - } + includeFlag = fmt.Sprintf("--include=logs --log-format=%s", logFormat) + } - debugDir, cleanup := setupZipDirWithProfiles(t, testInput) - defer cleanup() + debugDir, cleanup := setupZipDir(t, testInput) + defer cleanup() - stdout, err := c.RunWithCapture( - fmt.Sprintf("debug zip upload %s --dd-api-key=dd-api-key %s %s", debugDir, tags, clusterNameArg), - ) - require.NoError(t, err) + stdout, err := c.RunWithCapture(fmt.Sprintf( + "debug zip upload %s --dd-api-key=dd-api-key --dd-app-key=dd-app-key %s %s %s", + debugDir, tags, clusterNameArg, includeFlag, + )) + require.NoError(t, err) - // also write the STDOUT output to the finaloutput buffer. So, both the - // API request made to Datadog and the STDOUT output are validated. - _, err = finaloutput.WriteString(stdout) - require.NoError(t, err) + // also write the STDOUT output to the finaloutput buffer. So, both the + // API request made to Datadog and the STDOUT output are validated. + _, err = finaloutput.WriteString(stdout) + require.NoError(t, err) - // sort the lines to avoid flakiness in the test - lines := strings.Split(finaloutput.String(), "\n") - sort.Strings(lines) + lines := strings.Split(finaloutput.String(), "\n") + // if d.Cmd == "upload-profiles" { + // sort the lines to avoid flakiness in the test + sort.Strings(lines) + // } - // replace the debugDir with a constant string to avoid flakiness in the test - return strings.ReplaceAll(strings.TrimSpace(strings.Join(lines, "\n")), debugDir, "debugDir") + // replace the debugDir with a constant string to avoid flakiness in the test + return strings.ReplaceAll(strings.TrimSpace(strings.Join(lines, "\n")), debugDir, "debugDir") + }) }) } @@ -237,3 +267,107 @@ func TestZipUploadArtifactTypes(t *testing.T) { ) } } + +func uploadProfileHook(t *testing.T, req *http.Request) ([]byte, error) { + t.Helper() + + _, params, _ := mime.ParseMediaType(req.Header.Get("Content-Type")) + reader := multipart.NewReader(req.Body, params["boundary"]) + + // find the "event" part in the multipart request and copy it to the final output + for { + part, err := reader.NextPart() + if err == io.EOF { + break + } + + if part.FormName() == "event" { + var event profileUploadEvent + require.NoError(t, json.NewDecoder(part).Decode(&event)) + + if strings.Contains(event.Tags, "ERR") { + // this is a test to simulate a client error + return nil, fmt.Errorf("status: 400, body: 'runtime' is a required field") + } + + // validate the timestamps outside the data-driven test framework + // to keep the test deterministic. + start, err := time.Parse(time.RFC3339Nano, event.Start) + require.NoError(t, err) + + end, err := time.Parse(time.RFC3339Nano, event.End) + require.NoError(t, err) + + require.Equal(t, time.Second*5, end.Sub(start)) + event.Start = "" + event.End = "" + + // require.NoError(t, json.NewEncoder(&finaloutput).Encode(event)) + rawEvent, err := json.Marshal(event) + require.NoError(t, err) + + // print the event so that it gets captured as a part of RunWithCapture + fmt.Println(string(rawEvent)) + } + } + + return []byte("200 OK"), nil +} + +func setupDDArchiveHook(t *testing.T, req *http.Request) ([]byte, error) { + t.Helper() + + var body bytes.Buffer + _, err := body.ReadFrom(req.Body) + require.NoError(t, err) + + // print the request bidy so that it gets captured as a part of + // RunWithCapture + fmt.Println(body.String()) + return []byte("200 OK"), nil +} + +func writeLogsToGCSHook(ctx context.Context, chunkMap map[string][][]byte) error { + out := strings.Builder{} + for k, v := range chunkMap { + out.WriteString(fmt.Sprintf("%s:\n", k)) + for _, chunk := range v { + out.WriteString(fmt.Sprintf("%s\n", string(chunk))) + } + } + + // print the logs so that it gets captured as a part of RunWithCapture + fmt.Println(out.String()) + return nil +} + +func TestLogEntryToJSON(t *testing.T) { + defer leaktest.AfterTest(t)() + + // set the maxConcurrentUploads to 1 to avoid flakiness in the test + origConc := debugZipUploadOpts.maxConcurrentUploads + debugZipUploadOpts.maxConcurrentUploads = 1 + defer func() { + debugZipUploadOpts.maxConcurrentUploads = origConc + }() + + raw, err := logEntryToJSON(logpb.Entry{ + Severity: logpb.Severity_INFO, + Channel: logpb.Channel_STORAGE, + Time: time.Date(2024, time.August, 2, 0, 0, 0, 0, time.UTC).UnixNano(), + Message: "something happend", + }, []string{}) + require.NoError(t, err) + + t.Log(string(raw)) + + raw, err = logEntryToJSON(logpb.Entry{ + Severity: logpb.Severity_INFO, + Channel: logpb.Channel_STORAGE, + Time: time.Date(2024, time.August, 2, 0, 0, 0, 0, time.UTC).UnixNano(), + Message: `{"foo": "bar"}`, + }, []string{}) + require.NoError(t, err) + + t.Log(string(raw)) +} diff --git a/pkg/cloud/BUILD.bazel b/pkg/cloud/BUILD.bazel index 42c7242fe7fe..9ebbba110bd8 100644 --- a/pkg/cloud/BUILD.bazel +++ b/pkg/cloud/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql/isql", + "//pkg/util/cidr", "//pkg/util/ctxgroup", "//pkg/util/ioctx", "//pkg/util/log", diff --git a/pkg/cloud/amazon/BUILD.bazel b/pkg/cloud/amazon/BUILD.bazel index 5195decf421e..e8823f410af8 100644 --- a/pkg/cloud/amazon/BUILD.bazel +++ b/pkg/cloud/amazon/BUILD.bazel @@ -27,16 +27,19 @@ go_library( "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/tracing", - "@com_github_aws_aws_sdk_go//aws", - "@com_github_aws_aws_sdk_go//aws/awserr", - "@com_github_aws_aws_sdk_go//aws/client", - "@com_github_aws_aws_sdk_go//aws/credentials", - "@com_github_aws_aws_sdk_go//aws/credentials/stscreds", - "@com_github_aws_aws_sdk_go//aws/request", - "@com_github_aws_aws_sdk_go//aws/session", - "@com_github_aws_aws_sdk_go//service/kms", - "@com_github_aws_aws_sdk_go//service/s3", - "@com_github_aws_aws_sdk_go//service/s3/s3manager", + "@com_github_aws_aws_sdk_go_v2//aws", + "@com_github_aws_aws_sdk_go_v2//aws/retry", + "@com_github_aws_aws_sdk_go_v2//aws/transport/http", + "@com_github_aws_aws_sdk_go_v2_config//:config", + "@com_github_aws_aws_sdk_go_v2_credentials//:credentials", + "@com_github_aws_aws_sdk_go_v2_credentials//stscreds", + "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", + "@com_github_aws_aws_sdk_go_v2_service_kms//:kms", + "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", + "@com_github_aws_aws_sdk_go_v2_service_s3//types", + "@com_github_aws_aws_sdk_go_v2_service_sts//:sts", + "@com_github_aws_smithy_go//:smithy-go", + "@com_github_aws_smithy_go//logging", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", "@io_opentelemetry_go_otel//attribute", @@ -63,9 +66,10 @@ go_test( "//pkg/testutils/skip", "//pkg/util/leaktest", "@com_github_aws_aws_sdk_go//aws/awserr", - "@com_github_aws_aws_sdk_go//aws/credentials", "@com_github_aws_aws_sdk_go//aws/request", - "@com_github_aws_aws_sdk_go//service/s3", + "@com_github_aws_aws_sdk_go_v2_config//:config", + "@com_github_aws_aws_sdk_go_v2_service_s3//types", + "@com_github_aws_smithy_go//:smithy-go", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], diff --git a/pkg/cloud/amazon/aws_kms.go b/pkg/cloud/amazon/aws_kms.go index 7eb4eef48857..b64781a93988 100644 --- a/pkg/cloud/amazon/aws_kms.go +++ b/pkg/cloud/amazon/aws_kms.go @@ -16,11 +16,12 @@ import ( "reflect" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/kms" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -35,7 +36,7 @@ const ( ) type awsKMS struct { - kms *kms.KMS + kms *kms.Client customerMasterKeyID string } @@ -128,38 +129,41 @@ func MakeAWSKMS(ctx context.Context, uri string, env cloud.KMSEnv) (cloud.KMS, e } region := kmsURIParams.region - awsConfig := &aws.Config{ - Credentials: credentials.NewStaticCredentials(kmsURIParams.accessKey, - kmsURIParams.secret, kmsURIParams.tempToken), + if region == "" { + // TODO(adityamaru): Maybe use the KeyID to get the region, similar to how + // we infer the region from the bucket for s3_storage. + return nil, errors.New("aws kms REGION parameter not specified") } - awsConfig.Logger = newLogAdapter(ctx) + var loadOptions []func(options *config.LoadOptions) error + addLoadOption := func(option config.LoadOptionsFunc) { + loadOptions = append(loadOptions, option) + } + addLoadOption(config.WithLogger(newLogAdapter(ctx))) if kmsURIParams.verbose { - awsConfig.LogLevel = awsVerboseLogging + addLoadOption(config.WithClientLogMode(awsVerboseLogging)) } + var endpointURI string if kmsURIParams.endpoint != "" { if env.KMSConfig().DisableHTTP { return nil, errors.New( "custom endpoints disallowed for aws kms due to --aws-kms-disable-http flag") } - awsConfig.Endpoint = &kmsURIParams.endpoint - if region == "" { - // TODO(adityamaru): Think about what the correct way to handle this - // situation is. - region = "default-region" + client, err := cloud.MakeHTTPClient(env.ClusterSettings(), cloud.NilMetrics, "aws", "KMS") + if err != nil { + return nil, err } - client, err := cloud.MakeHTTPClient(env.ClusterSettings()) + addLoadOption(config.WithHTTPClient(client)) + endpointURI, err = constructEndpointURI(kmsURIParams.endpoint) if err != nil { return nil, err } - awsConfig.HTTPClient = client } // "specified": use credentials provided in URI params; error if not present. // "implicit": enable SharedConfig, which loads in credentials from environment. // Detailed in https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ // "": default to `specified`. - opts := session.Options{} switch kmsURIParams.auth { case "", cloud.AuthParamSpecified: if kmsURIParams.accessKey == "" { @@ -178,50 +182,47 @@ func MakeAWSKMS(ctx context.Context, uri string, env cloud.KMSEnv) (cloud.KMS, e AWSSecretParam, ) } - opts.Config.MergeIn(awsConfig) + addLoadOption(config.WithCredentialsProvider(aws.NewCredentialsCache( + credentials.NewStaticCredentialsProvider(kmsURIParams.accessKey, + kmsURIParams.secret, kmsURIParams.tempToken)))) case cloud.AuthParamImplicit: if env.KMSConfig().DisableImplicitCredentials { return nil, errors.New( "implicit credentials disallowed for s3 due to --external-io-disable-implicit-credentials flag") } - opts.SharedConfigState = session.SharedConfigEnable + addLoadOption(config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) default: return nil, errors.Errorf("unsupported value %s for %s", kmsURIParams.auth, cloud.AuthParam) } - sess, err := session.NewSessionWithOptions(opts) + cfg, err := config.LoadDefaultConfig(ctx, loadOptions...) if err != nil { - return nil, cloud.KMSInaccessible(errors.Wrap(err, "new aws session")) + return nil, cloud.KMSInaccessible(errors.Wrap(err, "could not initialize an aws config")) } + cfg.Region = region if kmsURIParams.roleProvider != (roleProvider{}) { // If there are delegate roles in the assume-role chain, we create a session // for each role in order for it to fetch the credentials from the next role // in the chain. for _, delegateProvider := range kmsURIParams.delegateRoleProviders { - intermediateCreds := stscreds.NewCredentials(sess, delegateProvider.roleARN, withExternalID(delegateProvider.externalID)) - opts.Config.Credentials = intermediateCreds - - sess, err = session.NewSessionWithOptions(opts) - if err != nil { - return nil, cloud.KMSInaccessible(errors.Wrap(err, "session with intermediate credentials")) - } + client := sts.NewFromConfig(cfg, func(options *sts.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }) + intermediateCreds := stscreds.NewAssumeRoleProvider(client, delegateProvider.roleARN, withExternalID(delegateProvider.externalID)) + cfg.Credentials = intermediateCreds } - creds := stscreds.NewCredentials(sess, kmsURIParams.roleProvider.roleARN, withExternalID(kmsURIParams.roleProvider.externalID)) - opts.Config.Credentials = creds - sess, err = session.NewSessionWithOptions(opts) - if err != nil { - return nil, cloud.KMSInaccessible(errors.Wrap(err, "session with assume role credentials")) - } - } - - if region == "" { - // TODO(adityamaru): Maybe use the KeyID to get the region, similar to how - // we infer the region from the bucket for s3_storage. - return nil, errors.New("could not find the aws kms region") + client := sts.NewFromConfig(cfg, func(options *sts.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }) + creds := stscreds.NewAssumeRoleProvider(client, kmsURIParams.roleProvider.roleARN, withExternalID(kmsURIParams.roleProvider.externalID)) + cfg.Credentials = creds } - sess.Config.Region = aws.String(region) reuse := reuseKMSSession.Get(&env.ClusterSettings().SV) if reuse { @@ -234,7 +235,11 @@ func MakeAWSKMS(ctx context.Context, uri string, env cloud.KMSEnv) (cloud.KMS, e } kms := &awsKMS{ - kms: kms.New(sess), + kms: kms.NewFromConfig(cfg, func(options *kms.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }), customerMasterKeyID: kmsURIParams.customerMasterKeyID, } @@ -258,7 +263,7 @@ func (k *awsKMS) Encrypt(ctx context.Context, data []byte) ([]byte, error) { Plaintext: data, } - encryptOutput, err := k.kms.Encrypt(encryptInput) + encryptOutput, err := k.kms.Encrypt(ctx, encryptInput) if err != nil { return nil, cloud.KMSInaccessible(err) } @@ -273,7 +278,7 @@ func (k *awsKMS) Decrypt(ctx context.Context, data []byte) ([]byte, error) { CiphertextBlob: data, } - decryptOutput, err := k.kms.Decrypt(decryptInput) + decryptOutput, err := k.kms.Decrypt(ctx, decryptInput) if err != nil { return nil, cloud.KMSInaccessible(err) } diff --git a/pkg/cloud/amazon/aws_kms_test.go b/pkg/cloud/amazon/aws_kms_test.go index 4b7ba6b49aa4..b70316be43b8 100644 --- a/pkg/cloud/amazon/aws_kms_test.go +++ b/pkg/cloud/amazon/aws_kms_test.go @@ -17,7 +17,7 @@ import ( "strings" "testing" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/config" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudtestutils" @@ -41,9 +41,10 @@ func TestEncryptDecryptAWS(t *testing.T) { // If environment credentials are not present, we want to // skip all AWS KMS tests, including auth-implicit, even though // it is not used in auth-implicit. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { - skip.IgnoreLint(t, "Test only works with AWS credentials") + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { + skip.IgnoreLint(t, "No AWS credentials") } q := make(url.Values) @@ -95,8 +96,11 @@ func TestEncryptDecryptAWS(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLint(t, err) } @@ -130,9 +134,10 @@ func TestEncryptDecryptAWSAssumeRole(t *testing.T) { // If environment credentials are not present, we want to // skip all AWS KMS tests, including auth-implicit, even though // it is not used in auth-implicit. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { - skip.IgnoreLint(t, "Test only works with AWS credentials") + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { + skip.IgnoreLint(t, "No AWS credentials") } q := make(url.Values) @@ -172,8 +177,11 @@ func TestEncryptDecryptAWSAssumeRole(t *testing.T) { // in the AWS console, then set it up locally. // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + require.NoError(t, err) + _, err = cfg.Credentials.Retrieve(ctx) if err != nil { skip.IgnoreLint(t, err) } @@ -333,7 +341,7 @@ func TestAWSKMSInaccessibleError(t *testing.T) { q2.Set(AWSSecretParam, q.Get(AWSSecretParam)+"garbage") uri := fmt.Sprintf("%s:///%s?%s", awsKMSScheme, keyID, q2.Encode()) - cloudtestutils.RequireKMSInaccessibleErrorContaining(ctx, t, uri, "status code: 400") + cloudtestutils.RequireKMSInaccessibleErrorContaining(ctx, t, uri, "StatusCode: 400") }) t.Run("incorrect-kms", func(t *testing.T) { diff --git a/pkg/cloud/amazon/s3_storage.go b/pkg/cloud/amazon/s3_storage.go index e0f984b6dbc9..50235e31119b 100644 --- a/pkg/cloud/amazon/s3_storage.go +++ b/pkg/cloud/amazon/s3_storage.go @@ -21,15 +21,18 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" @@ -82,6 +85,8 @@ const ( // scheme component of an S3 URI. scheme = "s3" + + checksumAlgorithm = types.ChecksumAlgorithmSha256 ) // NightlyEnvVarS3Params maps param keys that get added to an S3 @@ -108,18 +113,15 @@ type s3Storage struct { ioConf base.ExternalIODirConfig settings *cluster.Settings prefix string + metrics *cloud.Metrics opts s3ClientConfig cached *s3Client } -var _ request.Retryer = &customRetryer{} - // customRetryer implements the `request.Retryer` interface and allows for // customization of the retry behaviour of an AWS client. -type customRetryer struct { - client.DefaultRetryer -} +type customRetryer struct{} // isErrReadConnectionReset returns true if the underlying error is a read // connection reset error. @@ -141,15 +143,17 @@ func isErrReadConnectionReset(err error) bool { return err != nil && strings.Contains(err.Error(), "read: connection reset") } -// ShouldRetry implements the request.Retryer interface. -func (sr *customRetryer) ShouldRetry(r *request.Request) bool { - return sr.DefaultRetryer.ShouldRetry(r) || isErrReadConnectionReset(r.Error) +// IsErrorRetryable implements the retry.IsErrorRetryable interface. +func (sr *customRetryer) IsErrorRetryable(e error) aws.Ternary { + return aws.BoolTernary(isErrReadConnectionReset(e)) } +var _ retry.IsErrorRetryable = &customRetryer{} + // s3Client wraps an SDK client and uploader for a given session. type s3Client struct { - client *s3.S3 - uploader *s3manager.Uploader + client *s3.Client + uploader *manager.Uploader } var reuseSession = settings.RegisterBoolSetting( @@ -451,6 +455,7 @@ func MakeS3Storage( conf: conf, ioConf: args.IOConf, prefix: conf.Prefix, + metrics: args.MetricsRecorder, settings: args.Settings, opts: clientConfig(conf), } @@ -472,7 +477,7 @@ func MakeS3Storage( // other callers from making clients in the meantime, not just to avoid making // duplicate clients in a race but also because making clients concurrently // can fail if the AWS metadata server hits its rate limit. - client, _, err := newClient(ctx, s.opts, s.settings) + client, _, err := newClient(ctx, args.MetricsRecorder, s.opts, s.settings) if err != nil { return nil, err } @@ -486,8 +491,8 @@ type awsLogAdapter struct { ctx context.Context } -func (l *awsLogAdapter) Log(vals ...interface{}) { - log.Infof(l.ctx, "s3: %s", fmt.Sprint(vals...)) +func (l *awsLogAdapter) Logf(_ logging.Classification, format string, v ...interface{}) { + log.Infof(l.ctx, format, v...) } func newLogAdapter(ctx context.Context) *awsLogAdapter { @@ -496,14 +501,33 @@ func newLogAdapter(ctx context.Context) *awsLogAdapter { } } -var awsVerboseLogging = aws.LogLevel(aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors) +var awsVerboseLogging = aws.LogRequestEventMessage | aws.LogResponseEventMessage | aws.LogRetries | aws.LogSigning + +func constructEndpointURI(endpoint string) (string, error) { + parsedURL, err := url.Parse(endpoint) + if err != nil { + return "", errors.Wrap(err, "error parsing URL") + } + + if parsedURL.Scheme != "" { + return parsedURL.String(), nil + } + // Input URL doesn't have a scheme, construct a new URL with a default + // scheme. + u := &url.URL{ + Scheme: "https", // Default scheme + Host: endpoint, + } + + return u.String(), nil +} // newClient creates a client from the passed s3ClientConfig and if the passed // config's region is empty, used the passed bucket to determine a region and // configures the client with it as well as returning it (so the caller can // remember it for future calls). func newClient( - ctx context.Context, conf s3ClientConfig, settings *cluster.Settings, + ctx context.Context, metrics *cloud.Metrics, conf s3ClientConfig, settings *cluster.Settings, ) (s3Client, string, error) { // Open a span if client creation will do IO/RPCs to find creds/bucket region. if conf.region == "" || conf.auth == cloud.AuthParamImplicit { @@ -512,99 +536,109 @@ func newClient( defer sp.Finish() } - opts := session.Options{} + var loadOptions []func(options *config.LoadOptions) error + addLoadOption := func(option config.LoadOptionsFunc) { + loadOptions = append(loadOptions, option) + } - httpClient, err := cloud.MakeHTTPClient(settings) + client, err := cloud.MakeHTTPClient(settings, metrics, "aws", conf.bucket) if err != nil { return s3Client{}, "", err } - opts.Config.HTTPClient = httpClient - - if conf.endpoint != "" { - opts.Config.Endpoint = aws.String(conf.endpoint) - opts.Config.S3ForcePathStyle = aws.Bool(true) - - if conf.region == "" { - conf.region = "default-region" - } - } + addLoadOption(config.WithHTTPClient(client)) // TODO(yevgeniy): Revisit retry logic. Retrying 10 times seems arbitrary. - opts.Config.MaxRetries = aws.Int(10) + retryMaxAttempts := 10 + addLoadOption(config.WithRetryMaxAttempts(retryMaxAttempts)) - opts.Config.CredentialsChainVerboseErrors = aws.Bool(true) - - opts.Config.Logger = newLogAdapter(ctx) + addLoadOption(config.WithLogger(newLogAdapter(ctx))) if conf.verbose { - opts.Config.LogLevel = awsVerboseLogging + addLoadOption(config.WithClientLogMode(awsVerboseLogging)) } - retryer := &customRetryer{ - DefaultRetryer: client.DefaultRetryer{ - NumMaxRetries: *opts.Config.MaxRetries, - }, - } - opts.Config.Retryer = retryer - - var sess *session.Session + config.WithRetryer(func() aws.Retryer { + return retry.NewStandard(func(opts *retry.StandardOptions) { + opts.MaxAttempts = retryMaxAttempts + opts.Retryables = append(opts.Retryables, &customRetryer{}) + }) + }) switch conf.auth { case "", cloud.AuthParamSpecified: - sess, err = session.NewSessionWithOptions(opts) - if err != nil { - return s3Client{}, "", errors.Wrap(err, "new aws session") - } - sess.Config.Credentials = credentials.NewStaticCredentials(conf.accessKey, conf.secret, conf.tempToken) + addLoadOption(config.WithCredentialsProvider( + aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(conf.accessKey, conf.secret, conf.tempToken)))) case cloud.AuthParamImplicit: - opts.SharedConfigState = session.SharedConfigEnable - sess, err = session.NewSessionWithOptions(opts) + } + + cfg, err := config.LoadDefaultConfig(ctx, loadOptions...) + if err != nil { + return s3Client{}, "", errors.Wrap(err, "could not initialize an aws config") + } + + var endpointURI string + if conf.endpoint != "" { + var err error + endpointURI, err = constructEndpointURI(conf.endpoint) if err != nil { - return s3Client{}, "", errors.Wrap(err, "new aws session") + return s3Client{}, "", err } } if conf.assumeRoleProvider.roleARN != "" { for _, delegateProvider := range conf.delegateRoleProviders { - intermediateCreds := stscreds.NewCredentials(sess, delegateProvider.roleARN, withExternalID(delegateProvider.externalID)) - opts.Config.Credentials = intermediateCreds + client := sts.NewFromConfig(cfg, func(options *sts.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }) + intermediateCreds := stscreds.NewAssumeRoleProvider(client, delegateProvider.roleARN, withExternalID(delegateProvider.externalID)) + cfg.Credentials = aws.NewCredentialsCache(intermediateCreds) + } - sess, err = session.NewSessionWithOptions(opts) - if err != nil { - return s3Client{}, "", errors.Wrap(err, "session with intermediate credentials") + client := sts.NewFromConfig(cfg, func(options *sts.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) } - } + }) - creds := stscreds.NewCredentials(sess, conf.assumeRoleProvider.roleARN, withExternalID(conf.assumeRoleProvider.externalID)) - opts.Config.Credentials = creds - sess, err = session.NewSessionWithOptions(opts) - if err != nil { - return s3Client{}, "", errors.Wrap(err, "session with assume role credentials") - } + creds := stscreds.NewAssumeRoleProvider(client, conf.assumeRoleProvider.roleARN, withExternalID(conf.assumeRoleProvider.externalID)) + cfg.Credentials = creds } region := conf.region if region == "" { + // Set a hint because we have no region specified, we will override this + // below once we get the actual bucket region. + cfg.Region = "us-east-1" if err := cloud.DelayedRetry(ctx, "s3manager.GetBucketRegion", s3ErrDelay, func() error { - region, err = s3manager.GetBucketRegion(ctx, sess, conf.bucket, "us-east-1") + region, err = manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg, func(options *s3.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }), conf.bucket) return err }); err != nil { return s3Client{}, "", errors.Wrap(err, "could not find s3 bucket's region") } } - sess.Config.Region = aws.String(region) + cfg.Region = region - c := s3.New(sess) - u := s3manager.NewUploader(sess, func(uploader *s3manager.Uploader) { + c := s3.NewFromConfig(cfg, func(options *s3.Options) { + if endpointURI != "" { + options.BaseEndpoint = aws.String(endpointURI) + } + }) + u := manager.NewUploader(c, func(uploader *manager.Uploader) { uploader.PartSize = cloud.WriteChunkSize.Get(&settings.SV) }) return s3Client{client: c, uploader: u}, region, nil } -func (s *s3Storage) getClient(ctx context.Context) (*s3.S3, error) { +func (s *s3Storage) getClient(ctx context.Context) (*s3.Client, error) { if s.cached != nil { return s.cached.client, nil } - client, region, err := newClient(ctx, s.opts, s.settings) + client, region, err := newClient(ctx, s.metrics, s.opts, s.settings) if err != nil { return nil, err } @@ -614,11 +648,11 @@ func (s *s3Storage) getClient(ctx context.Context) (*s3.S3, error) { return client.client, nil } -func (s *s3Storage) getUploader(ctx context.Context) (*s3manager.Uploader, error) { +func (s *s3Storage) getUploader(ctx context.Context) (*manager.Uploader, error) { if s.cached != nil { return s.cached.uploader, nil } - client, region, err := newClient(ctx, s.opts, s.settings) + client, region, err := newClient(ctx, s.metrics, s.opts, s.settings) if err != nil { return nil, err } @@ -647,7 +681,7 @@ func (s *s3Storage) Settings() *cluster.Settings { type putUploader struct { b *bytes.Buffer - client *s3.S3 + client *s3.Client input *s3.PutObjectInput } @@ -657,7 +691,8 @@ func (u *putUploader) Write(p []byte) (int, error) { func (u *putUploader) Close() error { u.input.Body = bytes.NewReader(u.b.Bytes()) - _, err := u.client.PutObject(u.input) + // TODO(adityamaru): plumb a ctx through to close. + _, err := u.client.PutObject(context.Background(), u.input) return err } @@ -674,9 +709,10 @@ func (s *s3Storage) putUploader(ctx context.Context, basename string) (io.WriteC input: &s3.PutObjectInput{ Bucket: s.bucket, Key: aws.String(path.Join(s.prefix, basename)), - ServerSideEncryption: nilIfEmpty(s.conf.ServerEncMode), + ServerSideEncryption: types.ServerSideEncryption(s.conf.ServerEncMode), SSEKMSKeyId: nilIfEmpty(s.conf.ServerKMSID), - StorageClass: nilIfEmpty(s.conf.StorageClass), + StorageClass: types.StorageClass(s.conf.StorageClass), + ChecksumAlgorithm: checksumAlgorithm, }, client: client, }, nil @@ -698,13 +734,14 @@ func (s *s3Storage) Writer(ctx context.Context, basename string) (io.WriteCloser defer sp.Finish() // Upload the file to S3. // TODO(dt): test and tune the uploader parameters. - _, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{ + _, err := uploader.Upload(ctx, &s3.PutObjectInput{ Bucket: s.bucket, Key: aws.String(path.Join(s.prefix, basename)), Body: r, - ServerSideEncryption: nilIfEmpty(s.conf.ServerEncMode), + ServerSideEncryption: types.ServerSideEncryption(s.conf.ServerEncMode), SSEKMSKeyId: nilIfEmpty(s.conf.ServerKMSID), - StorageClass: nilIfEmpty(s.conf.StorageClass), + StorageClass: types.StorageClass(s.conf.StorageClass), + ChecksumAlgorithm: checksumAlgorithm, }) err = interpretAWSError(err) err = errors.Wrap(err, "upload failed") @@ -737,7 +774,7 @@ func (s *s3Storage) openStreamAt( req.Range = aws.String(fmt.Sprintf("bytes=%d-", pos)) } - out, err := client.GetObjectWithContext(ctx, req) + out, err := client.GetObject(ctx, req) if err != nil { err = interpretAWSError(err) if errors.Is(err, cloud.ErrFileDoesNotExist) { @@ -790,7 +827,6 @@ func (s *s3Storage) ReadFile( // so try a Size() request. x, err := s.Size(ctx, basename) if err != nil { - err = interpretAWSError(err) return nil, 0, errors.Wrap(err, "content-length missing from GetObject and Size() failed") } fileSize = x @@ -822,47 +858,44 @@ func (s *s3Storage) List(ctx context.Context, prefix, delim string, fn cloud.Lis return err } - var fnErr error - pageFn := func(page *s3.ListObjectsOutput, lastPage bool) bool { - for _, x := range page.CommonPrefixes { - if fnErr = fn(strings.TrimPrefix(*x.Prefix, dest)); fnErr != nil { - return false - } - } - for _, fileObject := range page.Contents { - if fnErr = fn(strings.TrimPrefix(*fileObject.Key, dest)); fnErr != nil { - return false - } - } - - return true - } - - var s3Input *s3.ListObjectsInput + var s3Input *s3.ListObjectsV2Input // Add an environment variable toggle for s3 storage to list prefixes with a // paging marker that's the prefix with an additional /. This allows certain // s3 clones which return s3:/// as the first result of listing // s3:// to exclude that result. if envutil.EnvOrDefaultBool("COCKROACH_S3_LIST_WITH_PREFIX_SLASH_MARKER", false) { - s3Input = &s3.ListObjectsInput{Bucket: s.bucket, Prefix: aws.String(dest), Delimiter: nilIfEmpty(delim), Marker: aws.String(dest + "/")} + s3Input = &s3.ListObjectsV2Input{Bucket: s.bucket, Prefix: aws.String(dest), Delimiter: nilIfEmpty(delim), StartAfter: aws.String(dest + "/")} } else { - s3Input = &s3.ListObjectsInput{Bucket: s.bucket, Prefix: aws.String(dest), Delimiter: nilIfEmpty(delim)} + s3Input = &s3.ListObjectsV2Input{Bucket: s.bucket, Prefix: aws.String(dest), Delimiter: nilIfEmpty(delim)} } - if err := client.ListObjectsPagesWithContext( - ctx, s3Input, pageFn, - ); err != nil { - err = interpretAWSError(err) - err = errors.Wrap(err, `failed to list s3 bucket`) - // Mark with ctx's error for upstream code to not interpret this as - // corruption. - if ctx.Err() != nil { - err = errors.Mark(err, ctx.Err()) + paginator := s3.NewListObjectsV2Paginator(client, s3Input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + err = interpretAWSError(err) + err = errors.Wrap(err, `failed to list s3 bucket`) + // Mark with ctx's error for upstream code to not interpret this as + // corruption. + if ctx.Err() != nil { + err = errors.Mark(err, ctx.Err()) + } + return err } - return err - } - return fnErr + for _, x := range page.CommonPrefixes { + if err := fn(strings.TrimPrefix(*x.Prefix, dest)); err != nil { + return err + } + } + + for _, fileObject := range page.Contents { + if err := fn(strings.TrimPrefix(*fileObject.Key, dest)); err != nil { + return err + } + } + } + return nil } // interpretAWSError attempts to surface safe information that otherwise would be redacted. @@ -884,16 +917,19 @@ func interpretAWSError(err error) error { err = errors.Wrap(err, "AccessDenied") } - if aerr := (awserr.Error)(nil); errors.As(err, &aerr) { - code := aerr.Code() + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + code := apiErr.ErrorCode() if code != "" { // nolint:errwrap err = errors.Wrapf(err, "%v", code) + noSuchBucket := types.NoSuchBucket{} + noSuchKey := types.NoSuchKey{} switch code { // Relevant 404 errors reported by AWS. - case s3.ErrCodeNoSuchBucket, s3.ErrCodeNoSuchKey: + case noSuchBucket.ErrorCode(), noSuchKey.ErrorCode(): // nolint:errwrap err = errors.Wrapf( errors.Wrap(cloud.ErrFileDoesNotExist, "s3 object does not exist"), @@ -919,7 +955,7 @@ func (s *s3Storage) Delete(ctx context.Context, basename string) error { return timeutil.RunWithTimeout(ctx, "delete s3 object", cloud.Timeout.Get(&s.settings.SV), func(ctx context.Context) error { - _, err := client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{ + _, err := client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: s.bucket, Key: aws.String(path.Join(s.prefix, basename)), }) @@ -941,7 +977,7 @@ func (s *s3Storage) Size(ctx context.Context, basename string) (int64, error) { cloud.Timeout.Get(&s.settings.SV), func(ctx context.Context) error { var err error - out, err = client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ + out, err = client.HeadObject(ctx, &s3.HeadObjectInput{ Bucket: s.bucket, Key: aws.String(path.Join(s.prefix, basename)), }) @@ -972,20 +1008,20 @@ func nilIfEmpty(s string) *string { } func s3ErrDelay(err error) time.Duration { - var s3err s3.RequestFailure - if errors.As(err, &s3err) { + var re *awshttp.ResponseError + if errors.As(err, &re) { // A 503 error could mean we need to reduce our request rate. Impose an // arbitrary slowdown in that case. // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - if s3err.StatusCode() == 503 { + if re.HTTPStatusCode() == 503 { return time.Second * 5 } } return 0 } -func withExternalID(externalID string) func(*stscreds.AssumeRoleProvider) { - return func(p *stscreds.AssumeRoleProvider) { +func withExternalID(externalID string) func(p *stscreds.AssumeRoleOptions) { + return func(p *stscreds.AssumeRoleOptions) { if externalID != "" { p.ExternalID = aws.String(externalID) } diff --git a/pkg/cloud/amazon/s3_storage_test.go b/pkg/cloud/amazon/s3_storage_test.go index 416f47862ba9..fc3741a82966 100644 --- a/pkg/cloud/amazon/s3_storage_test.go +++ b/pkg/cloud/amazon/s3_storage_test.go @@ -18,10 +18,11 @@ import ( "strings" "testing" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/smithy-go" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/cloud" @@ -60,16 +61,38 @@ func makeS3Storage( return s, nil } +// You can create an IAM that can access S3 in the AWS console, then +// set it up locally. +// https://docs.aws.com/cli/latest/userguide/cli-configure-role.html +// We only run the calling test if default role exists. +func skipIfNoDefaultConfig(t *testing.T, ctx context.Context) { + t.Helper() + const helpMsg = "we only run this test if a default role exists, " + + "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html" + config, err := config.LoadDefaultConfig(ctx, + config.WithSharedConfigProfile(config.DefaultSharedConfigProfile)) + if err != nil && err.Error() == "failed to get shared config profile, default" { + skip.IgnoreLintf(t, "%s: %s", helpMsg, err) + } + require.NoError(t, err) + _, err = config.Credentials.Retrieve(ctx) + if err != nil { + skip.IgnoreLintf(t, "%s: %s", helpMsg, err) + } +} + func TestPutS3(t *testing.T) { defer leaktest.AfterTest(t)() // If environment credentials are not present, we want to // skip all S3 tests, including auth-implicit, even though // it is not used in auth-implicit. - creds, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } + envCreds := envConfig.Credentials baseBucket := os.Getenv("AWS_S3_BUCKET") if baseBucket == "" { skip.IgnoreLint(t, "AWS_S3_BUCKET env var must be set") @@ -106,17 +129,7 @@ func TestPutS3(t *testing.T) { )) }) t.Run("auth-implicit", func(t *testing.T) { - // You can create an IAM that can access S3 - // in the AWS console, then set it up locally. - // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html - // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() - if err != nil { - skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ - "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) - } - + skipIfNoDefaultConfig(t, ctx) cloudtestutils.CheckExportStore(t, fmt.Sprintf( "s3://%s/%s-%d?%s=%s", bucket, "backup-test-default", testID, @@ -127,7 +140,7 @@ func TestPutS3(t *testing.T) { }) t.Run("auth-specified", func(t *testing.T) { uri := S3URI(bucket, fmt.Sprintf("backup-test-%d", testID), - &cloudpb.ExternalStorage_S3{AccessKey: creds.AccessKeyID, Secret: creds.SecretAccessKey, Region: "us-east-1"}, + &cloudpb.ExternalStorage_S3{AccessKey: envCreds.AccessKeyID, Secret: envCreds.SecretAccessKey, Region: "us-east-1"}, ) cloudtestutils.CheckExportStore( t, uri, false, user, nil /* db */, testSettings, @@ -139,17 +152,7 @@ func TestPutS3(t *testing.T) { // Tests that we can put an object with server side encryption specified. t.Run("server-side-encryption", func(t *testing.T) { - // You can create an IAM that can access S3 - // in the AWS console, then set it up locally. - // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html - // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() - if err != nil { - skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ - "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) - } - + skipIfNoDefaultConfig(t, ctx) cloudtestutils.CheckExportStore(t, fmt.Sprintf( "s3://%s/%s-%d?%s=%s&%s=%s", bucket, "backup-test-sse-256", testID, @@ -179,17 +182,7 @@ func TestPutS3(t *testing.T) { }) t.Run("server-side-encryption-invalid-params", func(t *testing.T) { - // You can create an IAM that can access S3 - // in the AWS console, then set it up locally. - // https://docs.aws.com/cli/latest/userguide/cli-configure-role.html - // We only run this test if default role exists. - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() - if err != nil { - skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ - "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) - } - + skipIfNoDefaultConfig(t, ctx) // Unsupported server side encryption option. invalidSSEModeURI := fmt.Sprintf( "s3://%s/%s?%s=%s&%s=%s", @@ -220,10 +213,12 @@ func TestPutS3AssumeRole(t *testing.T) { // If environment credentials are not present, we want to // skip all S3 tests, including auth-implicit, even though // it is not used in auth-implicit. - creds, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } + creds := envConfig.Credentials bucket := os.Getenv("AWS_S3_BUCKET") if bucket == "" { skip.IgnoreLint(t, "AWS_S3_BUCKET env var must be set") @@ -239,13 +234,9 @@ func TestPutS3AssumeRole(t *testing.T) { if roleArn == "" { skip.IgnoreLint(t, "AWS_ASSUME_ROLE env var must be set") } + ctx := context.Background() t.Run("auth-implicit", func(t *testing.T) { - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() - if err != nil { - skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ - "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) - } + skipIfNoDefaultConfig(t, ctx) uri := S3URI(bucket, testPath, &cloudpb.ExternalStorage_S3{Auth: cloud.AuthParamImplicit, RoleARN: roleArn, Region: "us-east-1"}, ) @@ -389,8 +380,9 @@ func TestS3DisallowCustomEndpoints(t *testing.T) { // including auth-implicit, even though it is not used in auth-implicit. // Without credentials, it's unclear if we can even communicate with an s3 // endpoint. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } @@ -412,8 +404,9 @@ func TestS3DisallowImplicitCredentials(t *testing.T) { // including auth-implicit, even though it is not used in auth-implicit. // Without credentials, it's unclear if we can even communicate with an s3 // endpoint. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } @@ -435,35 +428,36 @@ func TestS3DisallowImplicitCredentials(t *testing.T) { type awserror struct { error - orig error code, message string + fault smithy.ErrorFault } -var _ awserr.Error = awserror{} +var _ smithy.APIError = awserror{} -func (a awserror) Code() string { +func (a awserror) ErrorCode() string { return a.code } -func (a awserror) Message() string { +func (a awserror) ErrorMessage() string { return a.message } -func (a awserror) OrigErr() error { - return a.orig +func (a awserror) ErrorFault() smithy.ErrorFault { + return a.fault } func TestInterpretAWSCode(t *testing.T) { { // with code + err := types.BucketAlreadyOwnedByYou{} input := awserror{ error: errors.New("hello"), - code: s3.ErrCodeBucketAlreadyOwnedByYou, + code: err.ErrorCode(), } got := interpretAWSError(input) require.NotNil(t, got, "expected tryAWSCode to recognize an awserr.Error type") require.False(t, errors.Is(got, cloud.ErrFileDoesNotExist), "should not include cloud.ErrFileDoesNotExist in the error chain") - require.True(t, strings.Contains(got.Error(), s3.ErrCodeBucketAlreadyOwnedByYou), "aws error code should be in the error chain") + require.True(t, strings.Contains(got.Error(), err.ErrorCode()), "aws error code should be in the error chain") } { @@ -479,34 +473,36 @@ func TestInterpretAWSCode(t *testing.T) { { // with particular code + err := types.NoSuchBucket{} input := awserror{ error: errors.New("hello"), - code: s3.ErrCodeNoSuchBucket, + code: err.ErrorCode(), } got := interpretAWSError(input) require.NotNil(t, got, "expected tryAWSCode to regognize awserr.Error") require.True(t, errors.Is(got, cloud.ErrFileDoesNotExist), "expected cloud.ErrFileDoesNotExist in the error chain") - require.True(t, strings.Contains(got.Error(), s3.ErrCodeNoSuchBucket), "aws error code should be in the error chain") + require.True(t, strings.Contains(got.Error(), err.ErrorCode()), "aws error code should be in the error chain") } { // with keywords and code + err := types.ObjectNotInActiveTierError{} input := awserror{ error: errors.New("‹AccessDenied: User: arn:aws:sts::12345:assumed-role/12345 is not authorized to perform: sts:AssumeRole on resource: arn:aws:iam::12345›"), - code: s3.ErrCodeObjectAlreadyInActiveTierError, + code: err.ErrorCode(), } got := interpretAWSError(input) require.NotNil(t, got, "expected interpretAWSError to recognize keywords") require.True(t, strings.Contains(got.Error(), "AccessDenied"), "expected to see AccessDenied in error chain") require.True(t, strings.Contains(got.Error(), "AssumeRole"), "expected to see AssumeRole in error chain") - require.True(t, strings.Contains(got.Error(), s3.ErrCodeObjectAlreadyInActiveTierError), "aws error code should be in the error chain") + require.True(t, strings.Contains(got.Error(), err.ErrorCode()), "aws error code should be in the error chain") require.True(t, strings.Contains(got.Error(), "12345"), "SDK error should appear in the error chain") // the keywords and code should come through while the original got redacted redacted := errors.Redact(got) require.True(t, strings.Contains(got.Error(), "AccessDenied"), "expected to see AccessDenied in error chain after redaction") require.True(t, strings.Contains(got.Error(), "AssumeRole"), "expected to see AssumeRole in error chain after redaction") - require.True(t, strings.Contains(got.Error(), s3.ErrCodeObjectAlreadyInActiveTierError), "aws error code should be in the error chain after redaction") + require.True(t, strings.Contains(got.Error(), err.ErrorCode()), "aws error code should be in the error chain after redaction") require.False(t, strings.Contains(redacted, "12345"), "SDK error should have been redacted") } @@ -535,12 +531,9 @@ func TestS3BucketDoesNotExist(t *testing.T) { testSettings := cluster.MakeTestingClusterSettings() - credentialsProvider := credentials.SharedCredentialsProvider{} - _, err := credentialsProvider.Retrieve() - if err != nil { - skip.IgnoreLintf(t, "we only run this test if a default role exists, "+ - "refer to https://docs.aws.com/cli/latest/userguide/cli-configure-role.html: %s", err) - } + ctx := context.Background() + skipIfNoDefaultConfig(t, ctx) + q := make(url.Values) q.Add(cloud.AuthParam, cloud.AuthParamImplicit) q.Add(S3RegionParam, "us-east-1") @@ -554,7 +547,6 @@ func TestS3BucketDoesNotExist(t *testing.T) { RawQuery: q.Encode(), } - ctx := context.Background() user := username.RootUserName() conf, err := cloud.ExternalStorageConfFromURI(u.String(), user) @@ -591,8 +583,9 @@ func TestAntagonisticS3Read(t *testing.T) { // including auth-implicit, even though it is not used in auth-implicit. // Without credentials, it's unclear if we can even communicate with an s3 // endpoint. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } @@ -619,8 +612,9 @@ func TestNewClientErrorsOnBucketRegion(t *testing.T) { // including auth-implicit, even though it is not used in auth-implicit. // Without credentials, it's unclear if we can even communicate with an s3 // endpoint. - _, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } @@ -630,7 +624,7 @@ func TestNewClientErrorsOnBucketRegion(t *testing.T) { bucket: "bucket-does-not-exist-v1i3m", auth: cloud.AuthParamImplicit, } - _, _, err = newClient(ctx, cfg, testSettings) + _, _, err = newClient(ctx, cloud.NilMetrics, cfg, testSettings) require.Regexp(t, "could not find s3 bucket's region", err) } @@ -639,8 +633,9 @@ func TestNewClientErrorsOnBucketRegion(t *testing.T) { func TestReadFileAtReturnsSize(t *testing.T) { defer leaktest.AfterTest(t)() - _, err := credentials.NewEnvCredentials().Get() - if err != nil { + envConfig, err := config.NewEnvConfig() + require.NoError(t, err) + if !envConfig.Credentials.HasKeys() { skip.IgnoreLint(t, "No AWS credentials") } diff --git a/pkg/cloud/azure/azure_storage.go b/pkg/cloud/azure/azure_storage.go index d18e5470ff7d..bce1b02129d6 100644 --- a/pkg/cloud/azure/azure_storage.go +++ b/pkg/cloud/azure/azure_storage.go @@ -225,6 +225,13 @@ func makeAzureStorage( return nil, errors.Wrap(err, "azure: account name is not valid") } + t, err := cloud.MakeHTTPClient(args.Settings, args.MetricsRecorder, "azure", dest.AzureConfig.Container) + if err != nil { + return nil, errors.Wrap(err, "azure: unable to create transport") + } + var opts service.ClientOptions + opts.Transport = t + var azClient *service.Client switch conf.Auth { case cloudpb.AzureAuth_LEGACY: @@ -232,7 +239,7 @@ func makeAzureStorage( if err != nil { return nil, errors.Wrap(err, "azure shared key credential") } - azClient, err = service.NewClientWithSharedKeyCredential(u.String(), credential, nil) + azClient, err = service.NewClientWithSharedKeyCredential(u.String(), credential, &opts) if err != nil { return nil, err } @@ -241,7 +248,7 @@ func makeAzureStorage( if err != nil { return nil, errors.Wrap(err, "azure client secret credential") } - azClient, err = service.NewClient(u.String(), credential, nil) + azClient, err = service.NewClient(u.String(), credential, &opts) if err != nil { return nil, err } @@ -265,7 +272,7 @@ func makeAzureStorage( if err != nil { return nil, errors.Wrap(err, "azure default credential") } - azClient, err = service.NewClient(u.String(), credential, nil) + azClient, err = service.NewClient(u.String(), credential, &opts) if err != nil { return nil, err } diff --git a/pkg/cloud/azure/azure_storage_test.go b/pkg/cloud/azure/azure_storage_test.go index 5bd4ee8c7bf6..9cd660f81e1e 100644 --- a/pkg/cloud/azure/azure_storage_test.go +++ b/pkg/cloud/azure/azure_storage_test.go @@ -259,7 +259,8 @@ func TestMakeAzureStorageURLFromEnvironment(t *testing.T) { {environment: azure.USGovernmentCloud.Name, expected: "https://account.blob.core.usgovcloudapi.net/container"}, } { t.Run(tt.environment, func(t *testing.T) { - sut, err := makeAzureStorage(context.Background(), cloud.EarlyBootExternalStorageContext{}, cloudpb.ExternalStorage{ + testSettings := cluster.MakeTestingClusterSettings() + sut, err := makeAzureStorage(context.Background(), cloud.EarlyBootExternalStorageContext{Settings: testSettings}, cloudpb.ExternalStorage{ AzureConfig: &cloudpb.ExternalStorage_Azure{ Container: "container", Prefix: "path", diff --git a/pkg/cloud/cloud_io.go b/pkg/cloud/cloud_io.go index d64ecd9d5700..883cfc986965 100644 --- a/pkg/cloud/cloud_io.go +++ b/pkg/cloud/cloud_io.go @@ -81,8 +81,10 @@ var httpMetrics = settings.RegisterBoolSetting( // MakeHTTPClient makes an http client configured with the common settings used // for interacting with cloud storage (timeouts, retries, CA certs, etc). -func MakeHTTPClient(settings *cluster.Settings) (*http.Client, error) { - t, err := MakeTransport(settings) +func MakeHTTPClient( + settings *cluster.Settings, metrics *Metrics, cloud, bucket string, +) (*http.Client, error) { + t, err := MakeTransport(settings, metrics, cloud, bucket) if err != nil { return nil, err } @@ -98,10 +100,12 @@ func MakeHTTPClientForTransport(t http.RoundTripper) (*http.Client, error) { return &http.Client{Transport: t}, nil } -// MakeHTTPClient makes an http transport configured with the common -// settings used for interacting with cloud storage (timeouts, -// retries, CA certs, etc). Prefer MakeHTTPClient where possible. -func MakeTransport(settings *cluster.Settings) (*http.Transport, error) { +// MakeTransport makes an http transport configured with the common settings +// used for interacting with cloud storage (timeouts, retries, CA certs, etc). +// Prefer MakeHTTPClient where possible. +func MakeTransport( + settings *cluster.Settings, metrics *Metrics, cloud, bucket string, +) (*http.Transport, error) { var tlsConf *tls.Config if pem := httpCustomCA.Get(&settings.SV); pem != "" { roots, err := x509.SystemCertPool() @@ -121,6 +125,9 @@ func MakeTransport(settings *cluster.Settings) (*http.Transport, error) { // Bump up the default idle conn pool size as we have many parallel workers in // most bulk jobs. t.MaxIdleConnsPerHost = 64 + if metrics != nil { + t.DialContext = metrics.NetMetrics.Wrap(t.DialContext, cloud, bucket) + } return t, nil } diff --git a/pkg/cloud/external_storage.go b/pkg/cloud/external_storage.go index 8852a6b69751..9332e390db7b 100644 --- a/pkg/cloud/external_storage.go +++ b/pkg/cloud/external_storage.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/util/cidr" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" @@ -170,7 +171,6 @@ type ExternalStorageContext struct { BlobClientFactory blobs.BlobClientFactory DB isql.DB - MetricsRecorder *Metrics } // ExternalStorageContext contains the dependencies passed to external storage @@ -181,9 +181,10 @@ type EarlyBootExternalStorageContext struct { // storage, but I am rather uncertain it is a good idea. We // may be using this provider before we've even read our // cached settings. - Settings *cluster.Settings - Options []ExternalStorageOption - Limiters Limiters + Settings *cluster.Settings + Options []ExternalStorageOption + Limiters Limiters + MetricsRecorder *Metrics } // ExternalStorageOptions holds dependencies and values that can be @@ -207,13 +208,13 @@ type EarlyBootExternalStorageConstructor func( // NewEarlyBootExternalStorageAccessor creates an // EarlyBootExternalStorageAccessor func NewEarlyBootExternalStorageAccessor( - st *cluster.Settings, conf base.ExternalIODirConfig, + st *cluster.Settings, conf base.ExternalIODirConfig, lookup *cidr.Lookup, ) *EarlyBootExternalStorageAccessor { return &EarlyBootExternalStorageAccessor{ conf: conf, settings: st, limiters: MakeLimiters(&st.SV), - metrics: MakeMetrics(), + metrics: MakeMetrics(lookup), } } diff --git a/pkg/cloud/externalconn/record.go b/pkg/cloud/externalconn/record.go index d7e8882cea68..85912f1b1767 100644 --- a/pkg/cloud/externalconn/record.go +++ b/pkg/cloud/externalconn/record.go @@ -197,6 +197,11 @@ func (e *MutableExternalConnection) RedactedConnectionURI() string { if err == nil { return redactedURI } + case connectionpb.TypeForeignData.String(): + redactedURI, err := cloud.SanitizeExternalStorageURI(unredactedURI, nil) + if err == nil { + return redactedURI + } default: err = fmt.Errorf("cannot redact URI for unknown connection type: %s", e.rec.ConnectionType) } diff --git a/pkg/cloud/gcp/gcs_storage.go b/pkg/cloud/gcp/gcs_storage.go index 851c0cd6bf53..51cd30327389 100644 --- a/pkg/cloud/gcp/gcs_storage.go +++ b/pkg/cloud/gcp/gcs_storage.go @@ -190,7 +190,7 @@ func makeGCSStorage( opts = append(opts, assumeOpt) } - baseTransport, err := cloud.MakeTransport(args.Settings) + baseTransport, err := cloud.MakeTransport(args.Settings, args.MetricsRecorder, "gcs", conf.Bucket) if err != nil { return nil, errors.Wrap(err, "failed to create http transport") } diff --git a/pkg/cloud/httpsink/http_storage.go b/pkg/cloud/httpsink/http_storage.go index a7573fad02fc..fff83033f9e8 100644 --- a/pkg/cloud/httpsink/http_storage.go +++ b/pkg/cloud/httpsink/http_storage.go @@ -71,7 +71,7 @@ func MakeHTTPStorage( return nil, errors.Errorf("HTTP storage requested but prefix path not provided") } - client, err := cloud.MakeHTTPClient(args.Settings) + client, err := cloud.MakeHTTPClient(args.Settings, args.MetricsRecorder, "http", base) if err != nil { return nil, err } diff --git a/pkg/cloud/impl_registry.go b/pkg/cloud/impl_registry.go index 263e8bb228f7..42ee44646361 100644 --- a/pkg/cloud/impl_registry.go +++ b/pkg/cloud/impl_registry.go @@ -248,14 +248,14 @@ func MakeExternalStorage( } args := ExternalStorageContext{ EarlyBootExternalStorageContext: EarlyBootExternalStorageContext{ - IOConf: conf, - Settings: settings, - Options: opts, - Limiters: limiters, + IOConf: conf, + Settings: settings, + Options: opts, + Limiters: limiters, + MetricsRecorder: cloudMetrics, }, BlobClientFactory: blobClientFactory, DB: db, - MetricsRecorder: cloudMetrics, } return makeExternalStorage[ExternalStorageContext](ctx, dest, conf, limiters, metrics, settings, args, getImpl, opts...) diff --git a/pkg/cloud/metrics.go b/pkg/cloud/metrics.go index ff7a7a4338d4..79e53d38156a 100644 --- a/pkg/cloud/metrics.go +++ b/pkg/cloud/metrics.go @@ -14,6 +14,7 @@ import ( "context" "io" + "github.com/cockroachdb/cockroach/pkg/util/cidr" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/metric" io_prometheus_client "github.com/prometheus/client_model/go" @@ -29,15 +30,11 @@ type Metrics struct { CreatedReaders *metric.Counter // OpenReaders is the number of currently open cloud readers. OpenReaders *metric.Gauge - // ReadBytes counts the bytes read from cloud storage. - ReadBytes *metric.Counter // Writers counts the cloud storage writers opened. CreatedWriters *metric.Counter // OpenReaders is the number of currently open cloud writers. OpenWriters *metric.Gauge - // WriteBytes counts the bytes written to cloud storage. - WriteBytes *metric.Counter // Listings counts the listing calls made to cloud storage. Listings *metric.Counter @@ -47,10 +44,13 @@ type Metrics struct { // ConnsOpened, ConnsReused and TLSHandhakes track connection http info for cloud // storage when collecting this info is enabled. ConnsOpened, ConnsReused, TLSHandhakes *metric.Counter + + // NetMetrics tracks connection level metrics. + NetMetrics *cidr.NetMetrics } // MakeMetrics returns a new instance of Metrics. -func MakeMetrics() metric.Struct { +func MakeMetrics(cidrLookup *cidr.Lookup) metric.Struct { cloudReaders := metric.Metadata{ Name: "cloud.readers_opened", Help: "Readers opened by all cloud operations", @@ -131,15 +131,14 @@ func MakeMetrics() metric.Struct { return &Metrics{ CreatedReaders: metric.NewCounter(cloudReaders), OpenReaders: metric.NewGauge(cloudOpenReaders), - ReadBytes: metric.NewCounter(cloudReadBytes), CreatedWriters: metric.NewCounter(cloudWriters), OpenWriters: metric.NewGauge(cloudOpenWriters), - WriteBytes: metric.NewCounter(cloudWriteBytes), Listings: metric.NewCounter(listings), ListingResults: metric.NewCounter(listingResults), ConnsOpened: metric.NewCounter(connsOpened), ConnsReused: metric.NewCounter(connsReused), TLSHandhakes: metric.NewCounter(tlsHandhakes), + NetMetrics: cidrLookup.MakeNetMetrics(cloudWriteBytes, cloudReadBytes, "cloud", "bucket"), } } @@ -158,8 +157,8 @@ func (m *Metrics) Reader( m.CreatedReaders.Inc(1) m.OpenReaders.Inc(1) return &metricsReader{ - inner: r, - m: m, + ReadCloserCtx: r, + m: m, } } @@ -171,24 +170,17 @@ func (m *Metrics) Writer(_ context.Context, _ ExternalStorage, w io.WriteCloser) m.CreatedWriters.Inc(1) m.OpenWriters.Inc(1) return &metricsWriter{ - w: w, - m: m, + WriteCloser: w, + m: m, } } type metricsReader struct { - inner ioctx.ReadCloserCtx + ioctx.ReadCloserCtx m *Metrics closed bool } -// Read implements the ioctx.ReadCloserCtx interface. -func (mr *metricsReader) Read(ctx context.Context, p []byte) (int, error) { - n, err := mr.inner.Read(ctx, p) - mr.m.ReadBytes.Inc(int64(n)) - return n, err -} - // Close implements the ioctx.ReadCloserCtx interface. func (mr *metricsReader) Close(ctx context.Context) error { if !mr.closed { @@ -196,29 +188,20 @@ func (mr *metricsReader) Close(ctx context.Context) error { mr.closed = true } - return mr.inner.Close(ctx) + return mr.ReadCloserCtx.Close(ctx) } type metricsWriter struct { - w io.WriteCloser + io.WriteCloser m *Metrics closed bool } -// Write implements the WriteCloser interface. -func (mw *metricsWriter) Write(p []byte) (int, error) { - n, err := mw.w.Write(p) - mw.m.WriteBytes.Inc(int64(n)) - return n, err -} - // Close implements the WriteCloser interface. func (mw *metricsWriter) Close() error { if !mw.closed { mw.m.OpenWriters.Dec(1) mw.closed = true } - return mw.w.Close() + return mw.WriteCloser.Close() } - -var _ io.WriteCloser = &metricsWriter{} diff --git a/pkg/clusterversion/clusterversion.go b/pkg/clusterversion/clusterversion.go index 974063a9fdc0..b1622f34a27c 100644 --- a/pkg/clusterversion/clusterversion.go +++ b/pkg/clusterversion/clusterversion.go @@ -270,6 +270,24 @@ func (cv ClusterVersion) Encode() []byte { return encoded } +// FenceVersion is the fence version -- the internal immediately prior -- for +// the given version. +// +// Fence versions allow the upgrades infrastructure to safely step through +// consecutive cluster versions in the presence of Nodes (running any binary +// version) being added to the cluster. See the upgrademanager package for +// intended usage. +// +// Fence versions (and the upgrades infrastructure entirely) were introduced in +// the 21.1 release cycle. In the same release cycle, we introduced the +// invariant that new user-defined versions (users being crdb engineers) must +// always have even-numbered Internal versions, thus reserving the odd numbers +// to slot in fence versions for each cluster version. See top-level +// documentation in the clusterversion package for more details. +func (cv ClusterVersion) FenceVersion() ClusterVersion { + return ClusterVersion{Version: cv.Version.FenceVersion()} +} + var _ settings.ClusterVersionImpl = ClusterVersion{} // EncodingFromVersionStr is a shorthand to generate an encoded cluster version diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index 442457c01ace..5e7f20fe25f3 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -224,6 +224,23 @@ const ( // config for the timeseries range if one does not exist currently. V24_3_AddTimeseriesZoneConfig + // V24_3_TableMetadata is the migration to add the table_metadata table + // to the system tenant. + V24_3_TableMetadata + + // V24_3_TenantExcludeDataFromBackup is the migration to add + // `exclude_data_from_backup` on certain system tables with low GC + // TTL to mirror the behaviour on the system tenant. + V24_3_TenantExcludeDataFromBackup + + // V24_3_AdvanceCommitIndexViaMsgApps is the version that makes the commit + // index advancement using MsgApps only, and not MsgHeartbeat. + V24_3_AdvanceCommitIndexViaMsgApps + + // V24_3_SQLInstancesAddDraining is the migration to add the `is_draining` + // column to the system.sql_instances table. + V24_3_SQLInstancesAddDraining + // ************************************************* // Step (1) Add new versions above this comment. // Do not add new versions to a patch release. @@ -272,9 +289,12 @@ var versionTable = [numKeys]roachpb.Version{ // v24.3 versions. Internal versions must be even. V24_3_Start: {Major: 24, Minor: 2, Internal: 2}, - V24_3_StoreLivenessEnabled: {Major: 24, Minor: 2, Internal: 4}, - - V24_3_AddTimeseriesZoneConfig: {Major: 24, Minor: 2, Internal: 6}, + V24_3_StoreLivenessEnabled: {Major: 24, Minor: 2, Internal: 4}, + V24_3_AddTimeseriesZoneConfig: {Major: 24, Minor: 2, Internal: 6}, + V24_3_TableMetadata: {Major: 24, Minor: 2, Internal: 8}, + V24_3_TenantExcludeDataFromBackup: {Major: 24, Minor: 2, Internal: 10}, + V24_3_AdvanceCommitIndexViaMsgApps: {Major: 24, Minor: 2, Internal: 12}, + V24_3_SQLInstancesAddDraining: {Major: 24, Minor: 2, Internal: 14}, // ************************************************* // Step (2): Add new versions above this comment. @@ -328,16 +348,6 @@ func (k Key) Version() roachpb.Version { return maybeApplyDevOffset(k, version) } -// FenceVersion is the fence version -- the internal immediately prior -- for -// the named version, if it is Internal. -func (k Key) FenceVersion() roachpb.Version { - v := k.Version() - if v.Internal > 0 { - v.Internal -= 1 - } - return v -} - // IsFinal returns true if the key corresponds to a final version (as opposed to // a transitional internal version during upgrade). func (k Key) IsFinal() bool { diff --git a/pkg/cmd/dev/compose.go b/pkg/cmd/dev/compose.go index 6b9890fd6530..21256048eab7 100644 --- a/pkg/cmd/dev/compose.go +++ b/pkg/cmd/dev/compose.go @@ -110,6 +110,7 @@ func (d *dev) compose(cmd *cobra.Command, _ []string) error { args = append(args, "--test_output", "all") args = append(args, "--test_env", "COCKROACH_DEV_LICENSE") args = append(args, "--test_env", "COCKROACH_RUN_COMPOSE=true") + args = append(args, "--sandbox_add_mount_pair", os.TempDir()) logCommand("bazel", args...) return d.exec.CommandContextInheritingStdStreams(ctx, "bazel", args...) diff --git a/pkg/cmd/docs-issue-generation/jira.go b/pkg/cmd/docs-issue-generation/jira.go index 7501cb82bca4..ded597e3d868 100644 --- a/pkg/cmd/docs-issue-generation/jira.go +++ b/pkg/cmd/docs-issue-generation/jira.go @@ -78,7 +78,8 @@ func searchJiraDocsIssuesSingle( for _, issue := range search.Issues { prNumber, commitSha, err := extractPRNumberCommitFromDocsIssueBody(issue.RenderedFields.Description) if err != nil { - return 0, 0, err + fmt.Printf("Error processing issue %s: %v", issue.Key, err) + continue // Skip this issue and continue with the next one } if prNumber != 0 && commitSha != "" { _, ok := m[prNumber] diff --git a/pkg/cmd/generate-bazel-extra/main.go b/pkg/cmd/generate-bazel-extra/main.go index f49729701bb2..203af349e28e 100644 --- a/pkg/cmd/generate-bazel-extra/main.go +++ b/pkg/cmd/generate-bazel-extra/main.go @@ -13,7 +13,6 @@ package main import ( "bufio" "errors" - "flag" "fmt" "log" "os" @@ -44,15 +43,12 @@ func getPackagesToQuery() string { return strings.Join(packagesToQuery, "+") } -func generateTestSuites() { - cmd := exec.Command( - "bazel", "query", - fmt.Sprintf(`kind("((go|sh)_(binary|library|test|transition_binary|transition_test))", %s)`, getPackagesToQuery()), - "--output=label_kind", - ) +func runCmd(name string, args ...string) string { + cmd := exec.Command(name, args...) + buf, err := cmd.Output() if err != nil { - log.Printf("Could not query Bazel tests: got error %v", err) + log.Printf("bazel command failed: got error %v", err) var cmderr *exec.ExitError if errors.As(err, &cmderr) { log.Printf("Got error output: %s", string(cmderr.Stderr)) @@ -61,8 +57,20 @@ func generateTestSuites() { } os.Exit(1) } + + return string(buf[:]) +} + +// generatePkgBuild generates pkg/BUILD.bazel and its test_suite target. +func generatePkgBuild() { + buf := runCmd( + "bazel", "query", + fmt.Sprintf(`kind("((go|sh)_(binary|library|test|transition_binary|transition_test))", %s)`, getPackagesToQuery()), + "--output=label_kind", + ) + var goLabels, testLabels []string - for _, line := range strings.Split(string(buf[:]), "\n") { + for _, line := range strings.Split(buf, "\n") { fields := strings.Fields(line) if len(fields) != 3 { continue @@ -123,10 +131,68 @@ test_suite( } } -func main() { - doTestSuites := flag.Bool("gen_test_suites", false, "generate test suites") - flag.Parse() - if *doTestSuites { - generateTestSuites() +// generateProtos generates the file pkg/protos.bzl. +func generateProtos() { + buf := runCmd( + "bazel", "query", "deps(//pkg/server/serverpb:serverpb_proto)", + "--output=label_kind", + ) + + var protoLabels, protoFiles []string + for _, line := range strings.Split(buf, "\n") { + fields := strings.Fields(line) + if len(fields) != 3 { + continue + } + kind := fields[0] + label := fields[2] + if kind == "proto_library" { + protoLabels = append(protoLabels, label) + } else if kind == "source" && (strings.HasPrefix(label, "//") || + strings.HasPrefix(label, "@com_github_prometheus_client_model//io/prometheus/client:") || + strings.HasPrefix(label, "@com_github_cockroachdb_errors//errorspb:")) { + protoFiles = append(protoFiles, label) + } + } + sort.Strings(protoLabels) + sort.Strings(protoFiles) + + f, err := os.Create("pkg/protos.bzl") + if err != nil { + log.Fatalf("Failed to open file `pkg/protos.bzl` - %v", err) } + w := bufio.NewWriter(f) + + fmt.Fprintln(w, `# Code generated by generate-bazel-extra, DO NOT EDIT. + +# This lists all the direct and indirect proto_library dependencies of +# //pkg/server/serverpb:serverpb_proto. +SERVER_PROTOS = [`) + for _, label := range protoLabels { + fmt.Fprintf(w, " %q,\n", label) + } + fmt.Fprintln(w, `] + +# This lists all the in-tree .proto files required to build serverpb_proto. +PROTO_FILES = [`) + for _, label := range protoFiles { + fmt.Fprintf(w, " %q,\n", label) + } + fmt.Fprintln(w, "]") + + if err := w.Flush(); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} + +func generate() { + generatePkgBuild() + generateProtos() +} + +func main() { + generate() } diff --git a/pkg/cmd/generate-binary/main.go b/pkg/cmd/generate-binary/main.go index 7f878112946c..fa1e5922addc 100644 --- a/pkg/cmd/generate-binary/main.go +++ b/pkg/cmd/generate-binary/main.go @@ -17,9 +17,22 @@ // The target postgres server must accept plaintext (non-ssl) connections from // the postgres:postgres account. A suitable server can be started with: // -// `docker run -p 127.0.0.1:5432:5432 postgres:11` +// Start a postgres14 server with postgis extension: // -// The output of this file generates pkg/sql/pgwire/testdata/encodings.json. +// docker run --name postgres \ +// -e POSTGRES_DB=db \ +// -e POSTGRES_HOST_AUTH_METHOD=trust \ +// -p 127.0.0.1:5432:5432 \ +// postgis/postgis:14-3.4 +// +// docker exec -it postgres psql -U postgres -c "CREATE EXTENSION postgis;" +// +// TODO(xiaochen): figure out where the `"Text": "9E+4"` in encodings.json comes from +// and fix it. (postgres 9 ~ 14 all return "90000" for `SELECT '9E+4'::decimal;`) +// +// Generate file "encodings.json": +// +// bazel run pkg/cmd/generate-binary > pkg/sql/pgwire/testdata/encodings.json package main import ( @@ -313,6 +326,8 @@ var inputs = map[string][]string{ "0004-10-19 10:23:54 BC", "4004-10-19 10:23:54", "9004-10-19 10:23:54", + "infinity", + "-infinity", }, "'%s'::timestamptz": { diff --git a/pkg/cmd/generate-logictest/main.go b/pkg/cmd/generate-logictest/main.go index 21eaaaaa4ef0..cadadf0a8004 100644 --- a/pkg/cmd/generate-logictest/main.go +++ b/pkg/cmd/generate-logictest/main.go @@ -366,6 +366,22 @@ func generate() error { if err != nil { return err } + err = t.addLogicTests("TestReadCommittedLogic", readCommittedCalc) + if err != nil { + return err + } + repeatableReadCalc := logictestbase.ConfigCalculator{ + ConfigOverrides: []string{"local-repeatable-read"}, + RunCCLConfigs: true, + } + err = t.addCclLogicTests("TestRepeatableReadLogicCCL", repeatableReadCalc) + if err != nil { + return err + } + err = t.addLogicTests("TestRepeatableReadLogic", repeatableReadCalc) + if err != nil { + return err + } tenantCalc := logictestbase.ConfigCalculator{ ConfigOverrides: []string{"3node-tenant"}, ConfigFilterOverrides: []string{"3node-tenant-multiregion"}, @@ -383,6 +399,10 @@ func generate() error { if err != nil { return err } + err = t.addExecBuildLogicTests("TestRepeatableReadExecBuild", repeatableReadCalc) + if err != nil { + return err + } err = t.addExecBuildLogicTests("TestTenantExecBuild", tenantCalc) if err != nil { return err diff --git a/pkg/cmd/release/sender_test.go b/pkg/cmd/release/sender_test.go index bcce201f31b5..5bfa13ae65c1 100644 --- a/pkg/cmd/release/sender_test.go +++ b/pkg/cmd/release/sender_test.go @@ -193,7 +193,7 @@ func TestPostBlockers(t *testing.T) { } for _, test := range tests { - t.Run(fmt.Sprintf(test.testCase), func(t *testing.T) { + t.Run(test.testCase, func(t *testing.T) { var expectedMessage *message defer hookGlobal( &sendmail, diff --git a/pkg/cmd/release/update_versions.go b/pkg/cmd/release/update_versions.go index bc234d82adf3..5f2c16356cae 100644 --- a/pkg/cmd/release/update_versions.go +++ b/pkg/cmd/release/update_versions.go @@ -12,6 +12,7 @@ package main import ( "encoding/json" + "errors" "fmt" "html/template" "log" @@ -102,6 +103,8 @@ type prRepo struct { githubUsername string prBranch string fn func(gitDir string) error + // workOnRepoError is set to workOnRepo() result + workOnRepoError error } func (r prRepo) String() string { @@ -282,33 +285,31 @@ func updateVersions(_ *cobra.Command, _ []string) error { // This way we can fail early and avoid unnecessary work closing the PRs we were able to create. log.Printf("repos to work on: %s\n", reposToWorkOn) var prs []string + var workOnRepoErrors []error for _, repo := range reposToWorkOn { - log.Printf("Cloning repo %s", repo.name()) - if err := repo.clone(); err != nil { - return fmt.Errorf("cannot clone %s: %w", repo.name(), err) - } - log.Printf("Branching repo %s", repo.name()) - if err := repo.checkout(); err != nil { - return fmt.Errorf("cannot create branch %s: %w", repo.name(), err) - } - log.Printf("Munging repo %s", repo.name()) - if err := repo.apply(); err != nil { - return fmt.Errorf("cannot mutate repo %s: %w", repo.name(), err) - } - log.Printf("commiting changes to repo %s", repo.name()) - if err := repo.commit(); err != nil { - return fmt.Errorf("cannot commit changes in repo %s: %w", repo.name(), err) + repo.workOnRepoError = workOnRepo(repo) + if repo.workOnRepoError != nil { + err = fmt.Errorf("workOnRepo: error occurred while working on repo %s: %w", repo.name(), err) + workOnRepoErrors = append(workOnRepoErrors, err) + log.Printf("%s", err) } } // Now that our local changes are staged, we can try and publish them. for _, repo := range reposToWorkOn { + if repo.workOnRepoError != nil { + log.Printf("PR creation skipped due to previous errors while working on %s: %s", repo.name(), repo.workOnRepoError) + continue + } dest := path.Join(globalWorkDir, repo.checkoutDir()) // We avoid creating duplicated PRs to allow this command to be // run multiple times. prDesc, err := repo.prExists() if err != nil { - return fmt.Errorf("checking pr: %w", err) + err = fmt.Errorf("error while checking if pull request exists for repo %s: %w", repo.name(), err) + workOnRepoErrors = append(workOnRepoErrors, err) + log.Printf("%s", err) + continue } if prDesc != "" { log.Printf("pull request for %s already exists: %s", repo.name(), prDesc) @@ -316,19 +317,30 @@ func updateVersions(_ *cobra.Command, _ []string) error { } log.Printf("pushing changes to repo %s in %s", repo.name(), dest) if err := repo.push(); err != nil { - return fmt.Errorf("cannot push changes for %s: %w", repo.name(), err) + err = fmt.Errorf("error while pushing changes to repo %s: %w", repo.name(), err) + workOnRepoErrors = append(workOnRepoErrors, err) + log.Printf("%s", err) + continue } log.Printf("creating pull request for %s in %s", repo.name(), dest) pr, err := repo.createPullRequest() if err != nil { - return fmt.Errorf("cannot create pull request for %s: %w", repo.name(), err) + err = fmt.Errorf("error creating pull request for %s: %w", repo.name(), err) + workOnRepoErrors = append(workOnRepoErrors, err) + log.Printf("%s", err) + continue } log.Printf("Created PR: %s\n", pr) prs = append(prs, pr) } if err := sendPrReport(releasedVersion, prs, smtpPassword); err != nil { - return fmt.Errorf("cannot send email: %w", err) + err = fmt.Errorf("error sending email: %w", err) + workOnRepoErrors = append(workOnRepoErrors, err) + log.Printf("%s", err) + } + if len(workOnRepoErrors) > 0 { + return errors.Join(workOnRepoErrors...) } return nil } @@ -552,6 +564,27 @@ func generateRepoList( return reposToWorkOn, nil } +func workOnRepo(repo prRepo) error { + log.Printf("Cloning repo %s", repo.name()) + if err := repo.clone(); err != nil { + return fmt.Errorf("cannot clone %s: %w", repo.name(), err) + } + log.Printf("Branching repo %s", repo.name()) + if err := repo.checkout(); err != nil { + return fmt.Errorf("cannot create branch %s: %w", repo.name(), err) + } + log.Printf("Munging repo %s", repo.name()) + if err := repo.apply(); err != nil { + return fmt.Errorf("cannot mutate repo %s: %w", repo.name(), err) + } + log.Printf("commiting changes to repo %s", repo.name()) + if err := repo.commit(); err != nil { + return fmt.Errorf("cannot commit changes in repo %s: %w", repo.name(), err) + } + + return nil +} + func isLatestStableBranch(version *semver.Version) (bool, error) { // Here we ignore pre-releases (alphas and betas), because we still want to run these operations. // This way we exclude greater pre-release versions from this decision. diff --git a/pkg/cmd/roachprod-microbench/BUILD.bazel b/pkg/cmd/roachprod-microbench/BUILD.bazel index a8f2d80c3d33..9f42f94dfb6b 100644 --- a/pkg/cmd/roachprod-microbench/BUILD.bazel +++ b/pkg/cmd/roachprod-microbench/BUILD.bazel @@ -30,11 +30,13 @@ go_library( "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", + "@com_github_influxdata_influxdb_client_go_v2//:influxdb-client-go", "@com_github_klauspost_pgzip//:pgzip", "@com_github_slack_go_slack//:slack", "@com_github_spf13_cobra//:cobra", "@org_golang_x_exp//maps", "@org_golang_x_perf//benchfmt", + "@org_golang_x_perf//benchseries", ], ) diff --git a/pkg/cmd/roachprod-microbench/compare.go b/pkg/cmd/roachprod-microbench/compare.go index f413e6548b0f..d8a9846db947 100644 --- a/pkg/cmd/roachprod-microbench/compare.go +++ b/pkg/cmd/roachprod-microbench/compare.go @@ -11,6 +11,7 @@ package main import ( + "bytes" "context" "fmt" "log" @@ -20,25 +21,39 @@ import ( "sort" "strings" "text/template" + "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod-microbench/google" "github.com/cockroachdb/cockroach/pkg/cmd/roachprod-microbench/model" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" "github.com/slack-go/slack" "golang.org/x/exp/maps" "golang.org/x/perf/benchfmt" + "golang.org/x/perf/benchseries" ) type compareConfig struct { - newDir string - oldDir string - sheetDesc string - slackUser string - slackChannel string - slackToken string - threshold float64 - publishGoogleSheet bool + slackConfig slackConfig + influxConfig influxConfig + experimentDir string + baselineDir string + sheetDesc string + threshold float64 +} + +type slackConfig struct { + user string + channel string + token string +} + +type influxConfig struct { + host string + token string + metadata map[string]string } type compare struct { @@ -48,6 +63,16 @@ type compare struct { ctx context.Context } +var defaultInfluxMetadata = map[string]string{ + "branch": "master", + "machine": "n2-standard-32", + "goarch": "amd64", + "goos": "linux", + "repository": "cockroach", + "run-time": timeutil.Now().Format(time.RFC3339), + "upload-time": timeutil.Now().Format(time.RFC3339), +} + const ( packageSeparator = "→" slackPercentageThreshold = 20.0 @@ -63,14 +88,21 @@ const slackCompareTemplateScript = ` ` func newCompare(config compareConfig) (*compare, error) { - // Use the old directory to infer package info. - packages, err := getPackagesFromLogs(config.oldDir) + // Use the baseline directory to infer package info. + packages, err := getPackagesFromLogs(config.baselineDir) if err != nil { return nil, err } + // Add default metadata values to the influx config for any missing keys. + for k, v := range defaultInfluxMetadata { + if _, ok := config.influxConfig.metadata[k]; !ok { + config.influxConfig.metadata[k] = v + } + } + ctx := context.Background() var service *google.Service - if config.publishGoogleSheet { + if config.sheetDesc != "" { service, err = google.New(ctx) if err != nil { return nil, err @@ -81,10 +113,15 @@ func newCompare(config compareConfig) (*compare, error) { func defaultCompareConfig() compareConfig { return compareConfig{ - threshold: skipComparison, // Skip comparison by default - slackUser: "microbench", - slackChannel: "perf-ops", - publishGoogleSheet: true, + threshold: skipComparison, // Skip comparison by default + slackConfig: slackConfig{ + user: "microbench", + channel: "perf-ops", + }, + influxConfig: influxConfig{ + host: "http://localhost:8086", + metadata: make(map[string]string), + }, } } @@ -101,12 +138,12 @@ func (c *compare) readMetrics() (map[string]*model.MetricMap, error) { // Read the previous and current results. If either is missing, we'll just // skip it. if err := processReportFile(results, "baseline", pkg, - filepath.Join(c.oldDir, getReportLogName(reportLogName, pkg))); err != nil { + filepath.Join(c.baselineDir, getReportLogName(reportLogName, pkg))); err != nil { return nil, err } if err := processReportFile(results, "experiment", pkg, - filepath.Join(c.newDir, getReportLogName(reportLogName, pkg))); err != nil { + filepath.Join(c.experimentDir, getReportLogName(reportLogName, pkg))); err != nil { log.Printf("failed to add report for %s: %s", pkg, err) return nil, err } @@ -278,7 +315,7 @@ func (c *compare) postToSlack( } - s := newSlackClient(c.slackUser, c.slackChannel, c.slackToken) + s := newSlackClient(c.slackConfig.user, c.slackConfig.channel, c.slackConfig.token) return s.Post( slack.MsgOptionText(fmt.Sprintf("Microbenchmark comparison summary: %s", c.sheetDesc), false), slack.MsgOptionAttachments(attachments...), @@ -318,6 +355,171 @@ func (c *compare) compareUsingThreshold(comparisonResultsMap model.ComparisonRes return nil } +func (c *compare) createBenchSeries() ([]*benchseries.ComparisonSeries, error) { + opts := benchseries.DefaultBuilderOptions() + opts.Experiment = "run-time" + opts.Compare = "cockroach" + builder, err := benchseries.NewBuilder(opts) + if err != nil { + return nil, err + } + + var benchBuf bytes.Buffer + readFileFn := func(filePath string, required bool) error { + data, err := os.ReadFile(filePath) + if err != nil { + if !required && oserror.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "failed to read file %s", filePath) + } + benchBuf.Write(data) + benchBuf.WriteString("\n") + return nil + } + + for k, v := range c.influxConfig.metadata { + benchBuf.WriteString(fmt.Sprintf("%s: %s\n", k, v)) + } + + logPaths := map[string]string{ + "experiment": c.experimentDir, + "baseline": c.baselineDir, + } + for logType, dir := range logPaths { + benchBuf.WriteString(fmt.Sprintf("cockroach: %s\n", logType)) + logPath := filepath.Join(dir, "metadata.log") + if err = readFileFn(logPath, true); err != nil { + return nil, err + } + for _, pkg := range c.packages { + benchBuf.WriteString(fmt.Sprintf("pkg: %s\n", pkg)) + logPath = filepath.Join(dir, getReportLogName(reportLogName, pkg)) + if err = readFileFn(logPath, false); err != nil { + return nil, err + } + } + } + + benchReader := benchfmt.NewReader(bytes.NewReader(benchBuf.Bytes()), "buffer") + recordsMap := make(map[string][]*benchfmt.Result) + seen := make(map[string]map[string]struct{}) + for benchReader.Scan() { + switch rec := benchReader.Result().(type) { + case *benchfmt.SyntaxError: + // In case the benchmark log is corrupted or contains a syntax error, we + // want to return an error to the caller. + return nil, fmt.Errorf("syntax error: %v", rec) + case *benchfmt.Result: + var cmp, pkg string + for _, config := range rec.Config { + if config.Key == "pkg" { + pkg = string(config.Value) + } + if config.Key == opts.Compare { + cmp = string(config.Value) + } + } + key := pkg + packageSeparator + string(rec.Name) + // Update the name to include the package name. This is a workaround for + // `benchseries`, that currently does not support package names. + rec.Name = benchfmt.Name(key) + recordsMap[key] = append(recordsMap[key], rec.Clone()) + // Determine if we've seen this package/benchmark combination for both + // the baseline and experiment run. + if _, ok := seen[key]; !ok { + seen[key] = make(map[string]struct{}) + } + seen[key][cmp] = struct{}{} + } + } + + // Add only the benchmarks that have been seen in both the baseline and + // experiment run. + for key, records := range recordsMap { + if len(seen[key]) != 2 { + continue + } + for _, rec := range records { + builder.Add(rec) + } + } + + comparisons, err := builder.AllComparisonSeries(nil, benchseries.DUPE_REPLACE) + if err != nil { + return nil, errors.Wrap(err, "failed to create comparison series") + } + return comparisons, nil +} + +func (c *compare) pushToInfluxDB() error { + client := influxdb2.NewClient(c.influxConfig.host, c.influxConfig.token) + defer client.Close() + writeAPI := client.WriteAPI("cockroach", "microbench") + errorChan := writeAPI.Errors() + + comparisons, err := c.createBenchSeries() + if err != nil { + return err + } + + for _, cs := range comparisons { + cs.AddSummaries(0.95, 1000) + residues := make(map[string]string) + for _, r := range cs.Residues { + residues[r.S] = r.Slice[0] + } + + for idx, benchmarkName := range cs.Benchmarks { + sum := cs.Summaries[0][idx] + if !sum.Defined() { + continue + } + + experimentTime := cs.Series[0] + ts, err := benchseries.ParseNormalizedDateString(experimentTime) + if err != nil { + return errors.Wrap(err, "error parsing experiment commit date") + } + fields := map[string]interface{}{ + "low": sum.Low, + "center": sum.Center, + "high": sum.High, + "upload-time": residues["upload-time"], + "baseline-commit": cs.HashPairs[experimentTime].DenHash, + "experiment-commit": cs.HashPairs[experimentTime].NumHash, + "benchmarks-commit": residues["benchmarks-commit"], + } + pkg := strings.Split(benchmarkName, packageSeparator)[0] + benchmarkName = strings.Split(benchmarkName, packageSeparator)[1] + tags := map[string]string{ + "name": benchmarkName, + "unit": cs.Unit, + "pkg": pkg, + "repository": "cockroach", + "branch": residues["branch"], + "goarch": residues["goarch"], + "goos": residues["goos"], + "machine-type": residues["machine-type"], + } + p := influxdb2.NewPoint("benchmark-result", tags, fields, ts) + writeAPI.WritePoint(p) + } + } + done := make(chan struct{}) + go func() { + defer close(done) + writeAPI.Flush() + }() + + select { + case err = <-errorChan: + return errors.Wrap(err, "failed to write to InfluxDB") + case <-done: + return nil + } +} + func processReportFile(builder *model.Builder, id, pkg, path string) error { file, err := os.Open(path) if err != nil { diff --git a/pkg/cmd/roachprod-microbench/compare_test.go b/pkg/cmd/roachprod-microbench/compare_test.go index 07bf4bacfdf2..4e5bd6781200 100644 --- a/pkg/cmd/roachprod-microbench/compare_test.go +++ b/pkg/cmd/roachprod-microbench/compare_test.go @@ -71,8 +71,8 @@ func TestCompareBenchmarks(t *testing.T) { require.NoError(t, err) c := &compare{ compareConfig: compareConfig{ - newDir: newDir, - oldDir: oldDir, + experimentDir: newDir, + baselineDir: oldDir, }, packages: packages, } diff --git a/pkg/cmd/roachprod-microbench/main.go b/pkg/cmd/roachprod-microbench/main.go index e08e8cb17f13..683937bb7e1b 100644 --- a/pkg/cmd/roachprod-microbench/main.go +++ b/pkg/cmd/roachprod-microbench/main.go @@ -148,8 +148,8 @@ The source can be a local path or a GCS URI.`, func makeCompareCommand() *cobra.Command { config := defaultCompareConfig() runCmdFunc := func(cmd *cobra.Command, args []string) error { - config.newDir = args[0] - config.oldDir = args[1] + config.experimentDir = args[0] + config.baselineDir = args[1] c, err := newCompare(config) if err != nil { return err @@ -163,40 +163,51 @@ func makeCompareCommand() *cobra.Command { comparisonResult := c.createComparisons(metricMaps, "baseline", "experiment") var links map[string]string - if config.publishGoogleSheet { + if c.sheetDesc != "" { links, err = c.publishToGoogleSheets(comparisonResult) if err != nil { return err } + if c.slackConfig.token != "" { + err = c.postToSlack(links, comparisonResult) + if err != nil { + return err + } + } } - if config.slackToken != "" { - err = c.postToSlack(links, comparisonResult) + if c.influxConfig.token != "" { + err = c.pushToInfluxDB() if err != nil { return err } } // if the threshold is set, we want to compare and fail the job in case of perf regressions - if config.threshold != skipComparison { + if c.threshold != skipComparison { return c.compareUsingThreshold(comparisonResult) } return nil } cmd := &cobra.Command{ - Use: "compare ", + Use: "compare ", Short: "Compare two sets of microbenchmark results.", - Long: `Compare two sets of microbenchmark results.`, - Args: cobra.ExactArgs(2), - RunE: runCmdFunc, + Long: `Compare two sets of microbenchmark results. + +- experiment and baseline directories should contain the results of running microbenchmarks using the run command. +- experiment is generally considered the results from a new version of the code, and baseline the results from a stable version.`, + Args: cobra.ExactArgs(2), + RunE: runCmdFunc, } - cmd.Flags().StringVar(&config.sheetDesc, "sheet-desc", config.sheetDesc, "append a description to the sheet title when doing a comparison") - cmd.Flags().StringVar(&config.slackToken, "slack-token", config.slackToken, "pass a slack token to post the results to a slack channel") - cmd.Flags().StringVar(&config.slackUser, "slack-user", config.slackUser, "slack user to post the results as") - cmd.Flags().StringVar(&config.slackChannel, "slack-channel", config.slackChannel, "slack channel to post the results to") + cmd.Flags().StringVar(&config.sheetDesc, "sheet-desc", config.sheetDesc, "set a sheet description to publish the results to Google Sheets") + cmd.Flags().StringVar(&config.slackConfig.token, "slack-token", config.slackConfig.token, "pass a slack token to post the results to a slack channel") + cmd.Flags().StringVar(&config.slackConfig.user, "slack-user", config.slackConfig.user, "slack user to post the results as") + cmd.Flags().StringVar(&config.slackConfig.channel, "slack-channel", config.slackConfig.channel, "slack channel to post the results to") + cmd.Flags().StringVar(&config.influxConfig.host, "influx-host", config.influxConfig.host, "InfluxDB host to push the results to") + cmd.Flags().StringVar(&config.influxConfig.token, "influx-token", config.influxConfig.token, "pass an InfluxDB auth token to push the results to InfluxDB") + cmd.Flags().StringToStringVar(&config.influxConfig.metadata, "influx-metadata", config.influxConfig.metadata, "pass metadata to add to the InfluxDB measurement") cmd.Flags().Float64Var(&config.threshold, "threshold", config.threshold, "threshold in percentage value for detecting perf regression ") - cmd.Flags().BoolVar(&config.publishGoogleSheet, "publish-sheets", config.publishGoogleSheet, "flag to make the command create a google sheet of the benchmark results and publish it") return cmd } diff --git a/pkg/cmd/roachprod/flags.go b/pkg/cmd/roachprod/flags.go index d859343c40a0..b6fc5c18cd5e 100644 --- a/pkg/cmd/roachprod/flags.go +++ b/pkg/cmd/roachprod/flags.go @@ -47,6 +47,7 @@ var ( grafanaArch string grafanaDumpDir string jaegerConfigNodes string + listCost bool listDetails bool listJSON bool listMine bool @@ -68,10 +69,10 @@ var ( useTreeDist = true sig = 9 waitFlag = false - maxWait = 0 + gracePeriod = 0 deploySig = 15 deployWaitFlag = true - deployMaxWait = 300 + deployGracePeriod = 300 pause = time.Duration(0) createVMOpts = vm.DefaultCreateOpts() startOpts = roachprod.DefaultStartOpts() @@ -179,6 +180,10 @@ func initFlags() { extendCmd.Flags().DurationVarP(&extendLifetime, "lifetime", "l", 12*time.Hour, "Lifetime of the cluster") + listCmd.Flags().BoolVarP(&listCost, + "cost", "c", os.Getenv("ROACHPROD_NO_COST_ESTIMATES") != "true", + "Show cost estimates", + ) listCmd.Flags().BoolVarP(&listDetails, "details", "d", false, "Show cluster details") listCmd.Flags().BoolVar(&listJSON, @@ -258,21 +263,23 @@ func initFlags() { // See: https://github.com/spf13/cobra/issues/1398 sigPtr := &sig waitPtr := &waitFlag - maxWaitPtr := &maxWait + gracePeriodPtr := &gracePeriod // deployCmd is a special case, because it is used to stop processes in a // rolling restart, and we want to drain the nodes by default. if stopProcessesCmd == deployCmd { sigPtr = &deploySig waitPtr = &deployWaitFlag - maxWaitPtr = &deployMaxWait + gracePeriodPtr = &deployGracePeriod } stopProcessesCmd.Flags().IntVar(sigPtr, "sig", *sigPtr, "signal to pass to kill") stopProcessesCmd.Flags().BoolVar(waitPtr, "wait", *waitPtr, "wait for processes to exit") - stopProcessesCmd.Flags().IntVar(maxWaitPtr, "max-wait", *maxWaitPtr, "approx number of seconds to wait for processes to exit") + stopProcessesCmd.Flags().IntVar(gracePeriodPtr, "grace-period", *gracePeriodPtr, "approx number of seconds to wait for processes to exit, before a forceful shutdown (SIGKILL) is performed") } deployCmd.Flags().DurationVar(&pause, "pause", pause, "duration to pause between node restarts") syncCmd.Flags().BoolVar(&listOpts.IncludeVolumes, "include-volumes", false, "Include volumes when syncing") + syncCmd.Flags().StringArrayVarP(&listOpts.IncludeProviders, "clouds", "c", + make([]string, 0), "Specify the cloud providers when syncing. Important: Use this flag only if you are certain that you want to sync with a specific cloud. All DNS host entries for other clouds will be erased from the DNS zone.") wipeCmd.Flags().BoolVar(&wipePreserveCerts, "preserve-certs", false, "do not wipe certificates") @@ -403,7 +410,7 @@ func initFlags() { cmd.Flags().BoolVar(&urlOpen, "open", false, "Open the url in a browser") } - for _, cmd := range []*cobra.Command{createCmd, destroyCmd, extendCmd, logsCmd} { + for _, cmd := range []*cobra.Command{createCmd, listCmd, destroyCmd, extendCmd, logsCmd} { cmd.Flags().StringVarP(&username, "username", "u", os.Getenv("ROACHPROD_USER"), "Username to run under, detect if blank") } diff --git a/pkg/cmd/roachprod/main.go b/pkg/cmd/roachprod/main.go index 4468cb5f3ecc..cd7b07ee66c3 100644 --- a/pkg/cmd/roachprod/main.go +++ b/pkg/cmd/roachprod/main.go @@ -230,7 +230,7 @@ directories inside ${HOME}/local directory are removed. `, Args: cobra.ArbitraryArgs, Run: wrap(func(cmd *cobra.Command, args []string) error { - return roachprod.Destroy(config.Logger, destroyAllMine, destroyAllLocal, args...) + return roachprod.Destroy(config.Logger, username, destroyAllMine, destroyAllLocal, args...) }), } @@ -307,7 +307,11 @@ hosts file. if listJSON && listDetails { return errors.New("'json' option cannot be combined with 'details' option") } - filteredCloud, err := roachprod.List(config.Logger, listMine, listPattern, vm.ListOptions{ComputeEstimatedCost: true}) + filteredCloud, err := roachprod.List(config.Logger, listMine, listPattern, + vm.ListOptions{ + Username: username, + ComputeEstimatedCost: listCost, + }) if err != nil { return err @@ -463,12 +467,12 @@ var bashCompletion = os.ExpandEnv("$HOME/.roachprod/bash-completion.sh") // a side-effect. If you don't care about the list output, just "roachprod list // &>/dev/null". var syncCmd = &cobra.Command{ - Use: "sync", + Use: "sync [flags]", Short: "sync ssh keys/config and hosts files", Long: ``, Args: cobra.NoArgs, Run: wrap(func(cmd *cobra.Command, args []string) error { - _, err := roachprod.Sync(config.Logger, vm.ListOptions{IncludeVolumes: listOpts.IncludeVolumes}) + _, err := roachprod.Sync(config.Logger, listOpts) _ = rootCmd.GenBashCompletionFile(bashCompletion) return err }), @@ -674,7 +678,7 @@ SIGHUP), unless you also configure --max-wait. if sig == 9 /* SIGKILL */ && !cmd.Flags().Changed("wait") { wait = true } - stopOpts := roachprod.StopOpts{Wait: wait, MaxWait: maxWait, ProcessTag: tag, Sig: sig} + stopOpts := roachprod.StopOpts{Wait: wait, GracePeriod: gracePeriod, ProcessTag: tag, Sig: sig} return roachprod.Stop(context.Background(), config.Logger, args[0], stopOpts) }), } @@ -759,7 +763,7 @@ non-terminating signal (e.g. SIGHUP), unless you also configure --max-wait. } stopOpts := roachprod.StopOpts{ Wait: wait, - MaxWait: maxWait, + GracePeriod: gracePeriod, Sig: sig, VirtualClusterName: virtualClusterName, SQLInstance: sqlInstance, @@ -803,7 +807,7 @@ Currently available application options are: versionArg = args[2] } return roachprod.Deploy(context.Background(), config.Logger, args[0], args[1], - versionArg, pause, deploySig, deployWaitFlag, deployMaxWait, secure) + versionArg, pause, deploySig, deployWaitFlag, deployGracePeriod, secure) }), } diff --git a/pkg/cmd/roachtest/BUILD.bazel b/pkg/cmd/roachtest/BUILD.bazel index d1901d83da7d..14962aad3cbe 100644 --- a/pkg/cmd/roachtest/BUILD.bazel +++ b/pkg/cmd/roachtest/BUILD.bazel @@ -74,6 +74,7 @@ go_library( "@com_github_spf13_cobra//:cobra", "@in_gopkg_yaml_v2//:yaml_v2", "@org_golang_x_sync//errgroup", + "@org_golang_x_sys//unix", ], ) @@ -115,6 +116,7 @@ go_test( "//pkg/roachprod/logger", "//pkg/roachprod/vm", "//pkg/roachprod/vm/azure", + "//pkg/roachprod/vm/gce", "//pkg/testutils", "//pkg/testutils/datapathutils", "//pkg/testutils/echotest", diff --git a/pkg/cmd/roachtest/README.md b/pkg/cmd/roachtest/README.md index fa9635cdd9a9..a2d225e96e3c 100644 --- a/pkg/cmd/roachtest/README.md +++ b/pkg/cmd/roachtest/README.md @@ -88,8 +88,10 @@ Especially for OSX users, building a linux CockroachDB binary is an extremely ti ``` # Make a single-node local cluster. roachprod create -n 1 local -# Tell roachprod to place a CockroachDB 21.1.5 Linux binary onto node 1. -roachprod stage local release v21.1.5 --os linux +# Tell roachprod to place a CockroachDB 23.1.12 Linux binary onto node 1. +# Alternatively you can choose any other recent release from the +# releases page: https://www.cockroachlabs.com/docs/releases/ +roachprod stage local release v23.1.12 --os linux --arch amd64 # Copy it from (local) node 1 to the current directory mv ~/local/1/cockroach cockroach # Can now use roachtest with the `--cockroach=./cockroach` flag. diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index 34dfb09de3aa..ca21d7759f2f 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -43,7 +43,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/tests" "github.com/cockroachdb/cockroach/pkg/roachprod" "github.com/cockroachdb/cockroach/pkg/roachprod/cloud" "github.com/cockroachdb/cockroach/pkg/roachprod/config" @@ -58,6 +57,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" _ "github.com/lib/pq" + "golang.org/x/sys/unix" ) func init() { @@ -665,6 +665,10 @@ type clusterImpl struct { seed *int64 } + // defaultVirtualCluster, when set, changes the default virtual + // cluster tests connect to by default. + defaultVirtualCluster string + // destroyState contains state related to the cluster's destruction. destroyState destroyState @@ -936,10 +940,6 @@ func (f *clusterFactory) newCluster( if err != nil { return nil, nil, err } - if clusterCloud != spec.Local { - providerOptsContainer.SetProviderOpts(clusterCloud.String(), providerOpts) - workloadProviderOptsContainer.SetProviderOpts(clusterCloud.String(), workloadProviderOpts) - } createFlagsOverride(&createVMOpts) // Make sure expiration is changed if --lifetime override flag @@ -962,6 +962,15 @@ func (f *clusterFactory) newCluster( // // https://github.com/cockroachdb/cockroach/issues/67906#issuecomment-887477675 genName := f.genName(cfg) + + // Set the zones used for the cluster. We call this in the loop as the default GCE zone + // is randomized to avoid zone exhaustion errors. + providerOpts, workloadProviderOpts = cfg.spec.SetRoachprodOptsZones(providerOpts, workloadProviderOpts, params, string(selectedArch)) + if clusterCloud != spec.Local { + providerOptsContainer.SetProviderOpts(clusterCloud.String(), providerOpts) + workloadProviderOptsContainer.SetProviderOpts(clusterCloud.String(), workloadProviderOpts) + } + // Logs for creating a new cluster go to a dedicated log file. var retryStr string if i > 1 { @@ -1144,16 +1153,21 @@ func (c *clusterImpl) validate( } if cpus := nodes.CPUs; cpus != 0 { for i, vm := range cDetails.VMs { + nodeID := i + 1 + // If we are using a workload node, the last node may have a different cpu count. + if len(cDetails.VMs) == nodeID && c.spec.WorkloadNode { + cpus = c.spec.WorkloadNodeCPUs + } vmCPUs := MachineTypeToCPUs(vm.MachineType) // vmCPUs will be negative if the machine type is unknown. Give unknown // machine types the benefit of the doubt. if vmCPUs > 0 && vmCPUs < cpus { - return fmt.Errorf("node %d has %d CPUs, test requires %d", i, vmCPUs, cpus) + return fmt.Errorf("node %d has %d CPUs, test requires %d", nodeID, vmCPUs, cpus) } // Clouds typically don't support odd numbers of vCPUs; they can result in subtle performance issues. // N.B. Some machine families, e.g., n2 in GCE, do not support 1 vCPU. (See AWSMachineType and GCEMachineType.) if vmCPUs > 1 && vmCPUs&1 == 1 { - return fmt.Errorf("node %d has an _odd_ number of CPUs (%d)", i, vmCPUs) + return fmt.Errorf("node %d has an _odd_ number of CPUs (%d)", nodeID, vmCPUs) } } } @@ -1834,7 +1848,9 @@ func (c *clusterImpl) doDestroy(ctx context.Context, l *logger.Logger) <-chan st // We use a non-cancelable context for running this command. Once we got // here, the cluster cannot be destroyed again, so we really want this // command to succeed. - if err := roachprod.Destroy(l, false /* destroyAllMine */, false /* destroyAllLocal */, c.name); err != nil { + if err := roachprod.Destroy(l, "" /* optionalUsername */, false, /* destroyAllMine */ + false, /* destroyAllLocal */ + c.name); err != nil { l.ErrorfCtx(ctx, "error destroying cluster %s: %s", c, err) } else { l.PrintfCtx(ctx, "destroying cluster %s... done", c) @@ -1925,25 +1941,11 @@ func (c *clusterImpl) PutE( return errors.Wrap(roachprod.Put(ctx, l, c.MakeNodes(nodes...), src, dest, true /* useTreeDist */), "cluster.PutE") } -// PutCockroach checks if a test specifies a cockroach binary to upload to all -// nodes in the cluster. By default, we randomly upload a binary with or without -// runtime assertions enabled. Note that we upload to all nodes even if they +// PutCockroach uploads a binary with or without runtime assertions enabled, +// as determined by t.Cockroach(). Note that we upload to all nodes even if they // don't use the binary, so that the test runner can always fetch logs. func (c *clusterImpl) PutCockroach(ctx context.Context, l *logger.Logger, t *testImpl) error { - switch t.spec.CockroachBinary { - case registry.RandomizedCockroach: - if tests.UsingRuntimeAssertions(t) { - t.l.Printf("To reproduce the same set of metamorphic constants, run this test with %s=%d", test.EnvAssertionsEnabledSeed, c.cockroachRandomSeed()) - } - return c.PutE(ctx, l, t.Cockroach(), test.DefaultCockroachPath, c.All()) - case registry.StandardCockroach: - return c.PutE(ctx, l, t.StandardCockroach(), test.DefaultCockroachPath, c.All()) - case registry.RuntimeAssertionsCockroach: - t.l.Printf("To reproduce the same set of metamorphic constants, run this test with %s=%d", test.EnvAssertionsEnabledSeed, c.cockroachRandomSeed()) - return c.PutE(ctx, l, t.RuntimeAssertionsCockroach(), test.DefaultCockroachPath, c.All()) - default: - return errors.Errorf("Specified cockroach binary does not exist.") - } + return c.PutE(ctx, l, t.Cockroach(), test.DefaultCockroachPath, c.All()) } // PutLibraries inserts the specified libraries, by name, into all nodes on the cluster @@ -2294,6 +2296,10 @@ func (c *clusterImpl) RefetchCertsFromNode(ctx context.Context, node int) error }) } +func (c *clusterImpl) SetDefaultVirtualCluster(name string) { + c.defaultVirtualCluster = name +} + // SetRandomSeed sets the random seed to be used by the cluster. If // not called, clusters generate a random seed from the global // generator in the `rand` package. This function must be called @@ -2312,13 +2318,7 @@ func (c *clusterImpl) cockroachRandomSeed() int64 { // If the user provided a seed via environment variable, always use // that, even if the test attempts to set a different seed. - if seedStr := os.Getenv(test.EnvAssertionsEnabledSeed); seedStr != "" { - seedOverride, err := strconv.ParseInt(seedStr, 0, 64) - if err != nil { - panic(fmt.Sprintf("error parsing %s: %s", test.EnvAssertionsEnabledSeed, err)) - } - c.randomSeed.seed = &seedOverride - } else if c.randomSeed.seed == nil { + if c.randomSeed.seed == nil { seed := rand.Int63() c.randomSeed.seed = &seed } @@ -2362,16 +2362,15 @@ func (c *clusterImpl) StopE( c.setStatusForClusterOpt("stopping", stopOpts.RoachtestOpts.Worker, nodes...) defer c.clearStatusForClusterOpt(stopOpts.RoachtestOpts.Worker) - if c.goCoverDir != "" && stopOpts.RoachprodOpts.Sig == 9 /* SIGKILL */ { - // If we are trying to collect coverage, we don't want to kill processes; - // use SIGUSR1 which dumps coverage data and exits. Note that Cockroach - // v23.1 and earlier ignore SIGUSR1, so we still want to send SIGKILL. + if c.goCoverDir != "" && stopOpts.RoachprodOpts.Sig == int(unix.SIGKILL) { + // If we are trying to collect coverage, we first send a SIGUSR1 + // which dumps coverage data and exits. Note that Cockroach v23.1 + // and earlier ignore SIGUSR1, so we still want to send SIGKILL, + // and that's the underlying behaviour of `Stop`. l.Printf("coverage mode: first trying to stop using SIGUSR1") - opts := stopOpts.RoachprodOpts - opts.Sig = 10 // SIGUSR1 - opts.Wait = true - opts.MaxWait = 10 - _ = roachprod.Stop(ctx, l, c.MakeNodes(nodes...), opts) + stopOpts.RoachprodOpts.Sig = 10 // SIGUSR1 + stopOpts.RoachprodOpts.Wait = true + stopOpts.RoachprodOpts.GracePeriod = 10 } return errors.Wrap(roachprod.Stop(ctx, l, c.MakeNodes(nodes...), stopOpts.RoachprodOpts), "cluster.StopE") } @@ -2474,9 +2473,12 @@ func (c *clusterImpl) RunE(ctx context.Context, options install.RunOptions, args c.f.L().Printf("details in %s.log", logFile) } l.Printf("> %s", cmd) + expanderCfg := install.ExpanderConfig{ + DefaultVirtualCluster: c.defaultVirtualCluster, + } if err := roachprod.Run( ctx, l, c.MakeNodes(nodes), "", "", c.IsSecure(), - l.Stdout, l.Stderr, args, options, + l.Stdout, l.Stderr, args, options.WithExpanderConfig(expanderCfg), ); err != nil { if err := ctx.Err(); err != nil { l.Printf("(note: incoming context was canceled: %s)", err) @@ -2647,6 +2649,7 @@ func (c *clusterImpl) pgURLErr( if opts.External { certsDir = c.localCertsDir } + opts.VirtualClusterName = c.virtualCluster(opts.VirtualClusterName) urls, err := roachprod.PgURL(ctx, l, c.MakeNodes(nodes), certsDir, opts) if err != nil { return nil, err @@ -2718,17 +2721,27 @@ func addrToHostPort(addr string) (string, int, error) { // InternalAdminUIAddr returns the internal Admin UI address in the form host:port // for the specified nodes. func (c *clusterImpl) InternalAdminUIAddr( - ctx context.Context, l *logger.Logger, nodes option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, opts ...option.OptionFunc, ) ([]string, error) { - return c.adminUIAddr(ctx, l, nodes, false) + var virtualClusterOptions option.VirtualClusterOptions + if err := option.Apply(&virtualClusterOptions, opts...); err != nil { + return nil, err + } + + return c.adminUIAddr(ctx, l, nodes, virtualClusterOptions, false /* external */) } // ExternalAdminUIAddr returns the external Admin UI address in the form host:port // for the specified nodes. func (c *clusterImpl) ExternalAdminUIAddr( - ctx context.Context, l *logger.Logger, nodes option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, opts ...option.OptionFunc, ) ([]string, error) { - return c.adminUIAddr(ctx, l, nodes, true) + var virtualClusterOptions option.VirtualClusterOptions + if err := option.Apply(&virtualClusterOptions, opts...); err != nil { + return nil, err + } + + return c.adminUIAddr(ctx, l, nodes, virtualClusterOptions, true /* external */) } func (c *clusterImpl) SQLPorts( @@ -2738,7 +2751,9 @@ func (c *clusterImpl) SQLPorts( tenant string, sqlInstance int, ) ([]int, error) { - return roachprod.SQLPorts(ctx, l, c.MakeNodes(nodes), c.IsSecure(), tenant, sqlInstance) + return roachprod.SQLPorts( + ctx, l, c.MakeNodes(nodes), c.IsSecure(), c.virtualCluster(tenant), sqlInstance, + ) } func (c *clusterImpl) AdminUIPorts( @@ -2748,15 +2763,43 @@ func (c *clusterImpl) AdminUIPorts( tenant string, sqlInstance int, ) ([]int, error) { - return roachprod.AdminPorts(ctx, l, c.MakeNodes(nodes), c.IsSecure(), tenant, sqlInstance) + return roachprod.AdminPorts( + ctx, l, c.MakeNodes(nodes), c.IsSecure(), c.virtualCluster(tenant), sqlInstance, + ) +} + +// virtualCluster returns the name of the virtual cluster that we +// should use when the requested `tenant` name was passed by the +// user. When a specific virtual cluster was required, we use +// it. Otherwise, we fallback to the cluster's default virtual +// cluster, if any. +func (c *clusterImpl) virtualCluster(name string) string { + if name != "" { + return name + } + + return c.defaultVirtualCluster } func (c *clusterImpl) adminUIAddr( - ctx context.Context, l *logger.Logger, nodes option.NodeListOption, external bool, + ctx context.Context, + l *logger.Logger, + nodes option.NodeListOption, + opts option.VirtualClusterOptions, + external bool, ) ([]string, error) { var addrs []string - adminURLs, err := roachprod.AdminURL(ctx, l, c.MakeNodes(nodes), "", 0, "", - external, false, false) + adminURLs, err := roachprod.AdminURL( + ctx, + l, + c.MakeNodes(nodes), + c.virtualCluster(opts.VirtualClusterName), + opts.SQLInstance, + "", /* path */ + external, + false, + false, + ) if err != nil { return nil, err } @@ -2839,7 +2882,7 @@ var _ = (&clusterImpl{}).ExternalIP // Conn returns a SQL connection to the specified node. func (c *clusterImpl) Conn( - ctx context.Context, l *logger.Logger, node int, opts ...func(*option.ConnOption), + ctx context.Context, l *logger.Logger, node int, opts ...option.OptionFunc, ) *gosql.DB { db, err := c.ConnE(ctx, l, node, opts...) if err != nil { @@ -2850,15 +2893,16 @@ func (c *clusterImpl) Conn( // ConnE returns a SQL connection to the specified node. func (c *clusterImpl) ConnE( - ctx context.Context, l *logger.Logger, node int, opts ...func(*option.ConnOption), + ctx context.Context, l *logger.Logger, node int, opts ...option.OptionFunc, ) (_ *gosql.DB, retErr error) { // NB: errors.Wrap returns nil if err is nil. defer func() { retErr = errors.Wrapf(retErr, "connecting to node %d", node) }() - connOptions := &option.ConnOption{} - for _, opt := range opts { - opt(connOptions) + var connOptions option.ConnOptions + if err := option.Apply(&connOptions, opts...); err != nil { + return nil, err } + urls, err := c.ExternalPGUrl(ctx, l, c.Node(node), roachprod.PGURLOptions{ VirtualClusterName: connOptions.VirtualClusterName, SQLInstance: connOptions.SQLInstance, @@ -2882,16 +2926,19 @@ func (c *clusterImpl) ConnE( } dataSourceName := u.String() - if len(connOptions.Options) > 0 { - vals := make(url.Values) - for k, v := range connOptions.Options { - vals.Add(k, v) - } - // connect_timeout is a libpq-specific parameter for the maximum wait for - // connection, in seconds. + vals := make(url.Values) + for k, v := range connOptions.ConnectionOptions { + vals.Add(k, v) + } + + if _, ok := vals["connect_timeout"]; !ok { + // connect_timeout is a libpq-specific parameter for the maximum + // wait for connection, in seconds. If the caller did not specify + // a connection timeout, we set a default. vals.Add("connect_timeout", "60") - dataSourceName = dataSourceName + "&" + vals.Encode() } + + dataSourceName = dataSourceName + "&" + vals.Encode() db, err := gosql.Open("postgres", dataSourceName) if err != nil { return nil, err diff --git a/pkg/cmd/roachtest/cluster/cluster_interface.go b/pkg/cmd/roachtest/cluster/cluster_interface.go index 2f22e93063c5..a11b45ad24b1 100644 --- a/pkg/cmd/roachtest/cluster/cluster_interface.go +++ b/pkg/cmd/roachtest/cluster/cluster_interface.go @@ -57,6 +57,10 @@ type Cluster interface { // used by builds with runtime assertions enabled. SetRandomSeed(seed int64) + // SetDefaultVirtualCluster changes the virtual cluster tests + // connect to by default. + SetDefaultVirtualCluster(string) + // Starting and stopping CockroachDB. StartE(ctx context.Context, l *logger.Logger, startOpts option.StartOpts, settings install.ClusterSettings, opts ...option.Option) error @@ -92,13 +96,13 @@ type Cluster interface { // SQL clients to nodes. - Conn(ctx context.Context, l *logger.Logger, node int, opts ...func(*option.ConnOption)) *gosql.DB - ConnE(ctx context.Context, l *logger.Logger, node int, opts ...func(*option.ConnOption)) (*gosql.DB, error) + Conn(ctx context.Context, l *logger.Logger, node int, opts ...option.OptionFunc) *gosql.DB + ConnE(ctx context.Context, l *logger.Logger, node int, opts ...option.OptionFunc) (*gosql.DB, error) // URLs and Ports for the Admin UI. - InternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) - ExternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) + InternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption, opts ...option.OptionFunc) ([]string, error) + ExternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption, opts ...option.OptionFunc) ([]string, error) AdminUIPorts(ctx context.Context, l *logger.Logger, node option.NodeListOption, tenant string, sqlInstance int) ([]int, error) // Running commands on nodes. diff --git a/pkg/cmd/roachtest/github.go b/pkg/cmd/roachtest/github.go index ffef330121ec..c38b1cceb148 100644 --- a/pkg/cmd/roachtest/github.go +++ b/pkg/cmd/roachtest/github.go @@ -161,7 +161,7 @@ func (g *githubIssues) createPostRequest( failures []failure, message string, sideEyeTimeoutSnapshotURL string, - metamorphicBuild bool, + runtimeAssertionsBuild bool, coverageBuild bool, ) (issues.PostRequest, error) { var mention []string @@ -199,7 +199,7 @@ func (g *githubIssues) createPostRequest( // Issues posted from roachtest are identifiable as such, and they are also release blockers // (this label may be removed by a human upon closer investigation). const infraFlakeLabel = "X-infra-flake" - const metamorphicLabel = "B-metamorphic-enabled" + const runtimeAssertionsLabel = "B-runtime-assertions-enabled" const coverageLabel = "B-coverage-enabled" labels := []string{"O-roachtest"} if infraFlake { @@ -208,12 +208,12 @@ func (g *githubIssues) createPostRequest( labels = append(labels, issues.TestFailureLabel) if !spec.NonReleaseBlocker { // TODO(radu): remove this check once these build types are stabilized. - if !metamorphicBuild && !coverageBuild { + if !coverageBuild && !runtimeAssertionsBuild { labels = append(labels, issues.ReleaseBlockerLabel) } } - if metamorphicBuild { - labels = append(labels, metamorphicLabel) + if runtimeAssertionsBuild { + labels = append(labels, runtimeAssertionsLabel) } if coverageBuild { labels = append(labels, coverageLabel) @@ -247,11 +247,11 @@ func (g *githubIssues) createPostRequest( artifacts := fmt.Sprintf("/%s", testName) clusterParams := map[string]string{ - roachtestPrefix("cloud"): roachtestflags.Cloud.String(), - roachtestPrefix("cpu"): fmt.Sprintf("%d", spec.Cluster.CPUs), - roachtestPrefix("ssd"): fmt.Sprintf("%d", spec.Cluster.SSDs), - roachtestPrefix("metamorphicBuild"): fmt.Sprintf("%t", metamorphicBuild), - roachtestPrefix("coverageBuild"): fmt.Sprintf("%t", coverageBuild), + roachtestPrefix("cloud"): roachtestflags.Cloud.String(), + roachtestPrefix("cpu"): fmt.Sprintf("%d", spec.Cluster.CPUs), + roachtestPrefix("ssd"): fmt.Sprintf("%d", spec.Cluster.SSDs), + roachtestPrefix("runtimeAssertionsBuild"): fmt.Sprintf("%t", runtimeAssertionsBuild), + roachtestPrefix("coverageBuild"): fmt.Sprintf("%t", coverageBuild), } // Emit CPU architecture only if it was specified; otherwise, it's captured below, assuming cluster was created. if spec.Cluster.Arch != "" { @@ -286,11 +286,11 @@ func (g *githubIssues) createPostRequest( "there should be a similar issue without the "+coverageLabel+" label. If there isn't one, it is "+ "possible that this failure is related to the code coverage infrastructure or overhead.") } - if metamorphicBuild { + if runtimeAssertionsBuild { topLevelNotes = append(topLevelNotes, - "This build has metamorphic test constants enabled. If the same failure was hit in a "+ - "non-metamorphic run, there should be a similar issue without the "+metamorphicLabel+" label. If there "+ - "isn't one, it is possible that this failure is caused by a metamorphic constant.") + "This build has runtime assertions enabled. If the same failure was hit in a run without assertions "+ + "enabled, there should be a similar issue without the "+runtimeAssertionsLabel+" label. If there "+ + "isn't one, then this failure is likely due to an assertion violation or (assertion) timeout.") } sideEyeMsg := "" @@ -305,7 +305,7 @@ func (g *githubIssues) createPostRequest( TestName: issueName, Labels: labels, // Keep issues separate unless the if these labels don't match. - AdoptIssueLabelMatchSet: []string{infraFlakeLabel, coverageLabel, metamorphicLabel}, + AdoptIssueLabelMatchSet: []string{infraFlakeLabel, coverageLabel, runtimeAssertionsLabel}, TopLevelNotes: topLevelNotes, Message: issueMessage, Artifacts: artifacts, @@ -325,19 +325,11 @@ func (g *githubIssues) MaybePost( return nil, nil } - var metamorphicBuild bool - switch t.spec.CockroachBinary { - case registry.StandardCockroach: - metamorphicBuild = false - case registry.RuntimeAssertionsCockroach: - metamorphicBuild = true - default: - metamorphicBuild = tests.UsingRuntimeAssertions(t) - } postRequest, err := g.createPostRequest( t.Name(), t.start, t.end, t.spec, t.failures(), message, sideEyeTimeoutSnapshotURL, - metamorphicBuild, t.goCoverEnabled) + tests.UsingRuntimeAssertions(t), t.goCoverEnabled) + if err != nil { return nil, err } diff --git a/pkg/cmd/roachtest/github_test.go b/pkg/cmd/roachtest/github_test.go index ce8568f1ec7c..c6cd50ea43fa 100644 --- a/pkg/cmd/roachtest/github_test.go +++ b/pkg/cmd/roachtest/github_test.go @@ -136,7 +136,7 @@ func TestCreatePostRequest(t *testing.T) { clusterCreationFailed bool loadTeamsFailed bool localSSD bool - metamorphicBuild bool + runtimeAssertionsBuild bool coverageBuild bool extraLabels []string arch vm.CPUArch @@ -161,22 +161,21 @@ func TestCreatePostRequest(t *testing.T) { expectedTeam: "@cockroachdb/unowned", expectedName: testName, expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "encrypted": "false", - "fs": "ext4", - "ssd": "0", - "cpu": "4", - "arch": "amd64", - "localSSD": "false", - "metamorphicBuild": "false", - "coverageBuild": "false", + "cloud": "gce", + "encrypted": "false", + "fs": "ext4", + "ssd": "0", + "cpu": "4", + "arch": "amd64", + "localSSD": "false", + "runtimeAssertionsBuild": "false", + "coverageBuild": "false", }), }, // 2. { - localSSD: true, - metamorphicBuild: true, - arch: vm.ArchARM64, + localSSD: true, + arch: vm.ArchARM64, failures: []failure{ createFailure(errClusterProvisioningFailed(errors.New("gcloud error"))), }, @@ -186,15 +185,15 @@ func TestCreatePostRequest(t *testing.T) { expectedName: "cluster_creation", expectedMessagePrefix: testName + " failed", expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "encrypted": "false", - "fs": "ext4", - "ssd": "0", - "cpu": "4", - "arch": "arm64", - "localSSD": "true", - "metamorphicBuild": "true", - "coverageBuild": "false", + "cloud": "gce", + "encrypted": "false", + "fs": "ext4", + "ssd": "0", + "cpu": "4", + "arch": "arm64", + "localSSD": "true", + "runtimeAssertionsBuild": "false", + "coverageBuild": "false", }), }, // 3. Assert that release-blocker label doesn't exist when @@ -210,11 +209,11 @@ func TestCreatePostRequest(t *testing.T) { expectedName: "ssh_problem", expectedMessagePrefix: testName + " failed", expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "ssd": "0", - "cpu": "4", - "metamorphicBuild": "false", - "coverageBuild": "false", + "cloud": "gce", + "ssd": "0", + "cpu": "4", + "runtimeAssertionsBuild": "false", + "coverageBuild": "false", }), }, // 4. Simulate failure loading TEAMS.yaml @@ -243,36 +242,36 @@ func TestCreatePostRequest(t *testing.T) { expectedTeam: "@cockroachdb/unowned", expectedName: testName, expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "encrypted": "false", - "fs": "ext4", - "ssd": "0", - "cpu": "4", - "arch": "amd64", - "localSSD": "false", - "metamorphicBuild": "false", - "coverageBuild": "false", + "cloud": "gce", + "encrypted": "false", + "fs": "ext4", + "ssd": "0", + "cpu": "4", + "arch": "amd64", + "localSSD": "false", + "runtimeAssertionsBuild": "false", + "coverageBuild": "false", }), }, - // 7. Verify that release-blocker label is not applied on metamorphic builds + // 7. Verify that release-blocker label is not applied on runtime assertion builds // (for now). { - metamorphicBuild: true, - failures: []failure{createFailure(errors.New("other"))}, - expectedPost: true, - expectedLabels: []string{"C-test-failure", "B-metamorphic-enabled"}, - expectedTeam: "@cockroachdb/unowned", - expectedName: testName, + runtimeAssertionsBuild: true, + failures: []failure{createFailure(errors.New("other"))}, + expectedPost: true, + expectedLabels: []string{"C-test-failure", "B-runtime-assertions-enabled"}, + expectedTeam: "@cockroachdb/unowned", + expectedName: testName, expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "encrypted": "false", - "fs": "ext4", - "ssd": "0", - "cpu": "4", - "arch": "amd64", - "localSSD": "false", - "metamorphicBuild": "true", - "coverageBuild": "false", + "cloud": "gce", + "encrypted": "false", + "fs": "ext4", + "ssd": "0", + "cpu": "4", + "arch": "amd64", + "localSSD": "false", + "runtimeAssertionsBuild": "true", + "coverageBuild": "false", }), }, // 8. Verify that release-blocker label is not applied on coverage builds (for @@ -286,15 +285,15 @@ func TestCreatePostRequest(t *testing.T) { expectedName: testName, expectedLabels: []string{"C-test-failure", "B-coverage-enabled", "foo-label"}, expectedParams: prefixAll(map[string]string{ - "cloud": "gce", - "encrypted": "false", - "fs": "ext4", - "ssd": "0", - "cpu": "4", - "arch": "amd64", - "localSSD": "false", - "metamorphicBuild": "false", - "coverageBuild": "true", + "cloud": "gce", + "encrypted": "false", + "fs": "ext4", + "ssd": "0", + "cpu": "4", + "arch": "amd64", + "localSSD": "false", + "runtimeAssertionsBuild": "false", + "coverageBuild": "true", }), }, // 9. Verify preemption failure are routed to test-eng and marked as infra-flake, when the @@ -446,7 +445,7 @@ func TestCreatePostRequest(t *testing.T) { req, err := github.createPostRequest( testName, ti.start, ti.end, testSpec, testCase.failures, - testCase.message, testCase.sideEyeURL, testCase.metamorphicBuild, testCase.coverageBuild, + testCase.message, testCase.sideEyeURL, testCase.runtimeAssertionsBuild, testCase.coverageBuild, ) if testCase.loadTeamsFailed { // Assert that if TEAMS.yaml cannot be loaded then function errors. diff --git a/pkg/cmd/roachtest/grafana/configs/changefeed-roachtest-grafana-dashboard.json b/pkg/cmd/roachtest/grafana/configs/changefeed-roachtest-grafana-dashboard.json index d8b89e1200e0..9107399e5364 100644 --- a/pkg/cmd/roachtest/grafana/configs/changefeed-roachtest-grafana-dashboard.json +++ b/pkg/cmd/roachtest/grafana/configs/changefeed-roachtest-grafana-dashboard.json @@ -1141,7 +1141,7 @@ "h": 7, "w": 6, "x": 0, - "y": 40 + "y": 56 }, "id": 18, "options": { @@ -1260,7 +1260,7 @@ "h": 7, "w": 6, "x": 6, - "y": 40 + "y": 56 }, "id": 57, "options": { @@ -1353,7 +1353,7 @@ "h": 7, "w": 5, "x": 12, - "y": 40 + "y": 56 }, "id": 72, "options": { @@ -1459,7 +1459,7 @@ "h": 7, "w": 6, "x": 0, - "y": 43 + "y": 59 }, "id": 35, "options": { @@ -1564,7 +1564,7 @@ "h": 7, "w": 6, "x": 6, - "y": 43 + "y": 59 }, "id": 33, "options": { @@ -1657,7 +1657,7 @@ "h": 7, "w": 6, "x": 12, - "y": 43 + "y": 59 }, "id": 39, "options": { @@ -1764,7 +1764,7 @@ "h": 6, "w": 4, "x": 0, - "y": 36 + "y": 52 }, "id": 63, "options": { @@ -1883,7 +1883,7 @@ "h": 6, "w": 4, "x": 4, - "y": 36 + "y": 52 }, "id": 64, "options": { @@ -2002,7 +2002,7 @@ "h": 6, "w": 4, "x": 8, - "y": 36 + "y": 52 }, "id": 65, "options": { @@ -2097,7 +2097,7 @@ "h": 6, "w": 4, "x": 12, - "y": 36 + "y": 52 }, "id": 75, "options": { @@ -2191,7 +2191,7 @@ "h": 6, "w": 4, "x": 16, - "y": 36 + "y": 52 }, "id": 19, "options": { @@ -2963,7 +2963,7 @@ "h": 8, "w": 8, "x": 0, - "y": 38 + "y": 54 }, "id": 22, "options": { @@ -3056,7 +3056,7 @@ "h": 8, "w": 10, "x": 8, - "y": 38 + "y": 54 }, "id": 24, "options": { @@ -3149,7 +3149,7 @@ "h": 8, "w": 8, "x": 0, - "y": 46 + "y": 62 }, "id": 23, "options": { @@ -3242,7 +3242,7 @@ "h": 8, "w": 5, "x": 8, - "y": 46 + "y": 62 }, "id": 50, "options": { @@ -3334,7 +3334,7 @@ "h": 8, "w": 5, "x": 13, - "y": 46 + "y": 62 }, "id": 51, "options": { @@ -3453,7 +3453,7 @@ "h": 7, "w": 8, "x": 0, - "y": 40 + "y": 56 }, "id": 31, "options": { @@ -3499,6 +3499,442 @@ ], "title": "Other", "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 82, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 84, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_select_count[$__rate_interval]))", + "legendFormat": "Selects", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_update_count[$__rate_interval]))", + "hide": false, + "legendFormat": "Updates", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_insert_count[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "Inserts", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_delete_count[$__rate_interval]))", + "hide": false, + "legendFormat": "Deletes", + "range": true, + "refId": "D" + } + ], + "title": "SQL Statements", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 86, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_bytesin[$__rate_interval]))", + "legendFormat": "Bytes In", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "expr": "sum(rate(sql_bytesout[$__rate_interval]))", + "hide": false, + "legendFormat": "Bytes Out", + "range": true, + "refId": "B" + } + ], + "title": "SQL Byte Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 88, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, rate(sql_service_latency_bucket[$__rate_interval]))", + "legendFormat": "node={{node}}", + "range": true, + "refId": "A" + } + ], + "title": "Service Latency: SQL, 99th percentile", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 89, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "localprom" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.90, rate(sql_service_latency_bucket[$__rate_interval]))", + "legendFormat": "node={{node}}", + "range": true, + "refId": "A" + } + ], + "title": "Service Latency: SQL, 90th percentile", + "type": "timeseries" + } + ], + "title": "SQL", + "type": "row" } ], "refresh": "5s", @@ -3516,6 +3952,6 @@ "timezone": "", "title": "CDC", "uid": "928XNlN4k", - "version": 2, + "version": 3, "weekStart": "" } diff --git a/pkg/cmd/roachtest/main.go b/pkg/cmd/roachtest/main.go index 978230094394..c00b1f7df1f5 100644 --- a/pkg/cmd/roachtest/main.go +++ b/pkg/cmd/roachtest/main.go @@ -13,6 +13,7 @@ package main import ( "context" "fmt" + "math" "math/rand" "os" "os/user" @@ -302,25 +303,44 @@ func testsToRun( // based on the test categorization criteria. func updateSpecForSelectiveTests(ctx context.Context, specs []registry.TestSpec) { selectedTestsCount := 0 - // run and select 35% of successful tests which gives a window of 3 days for all tests to run - selectedTests, err := testselector.CategoriseTests(ctx, - testselector.NewDefaultSelectTestsReq(35, roachtestflags.Cloud, roachtestflags.Suite)) + allTests, err := testselector.CategoriseTests(ctx, + testselector.NewDefaultSelectTestsReq(roachtestflags.Cloud, roachtestflags.Suite)) if err != nil { fmt.Printf("running all tests! error selecting tests: %v\n", err) return } - tdMap := make(map[string]*testselector.TestDetails) - for _, td := range selectedTests { - tdMap[td.Name] = td + + // successfulTests are the tests considered by snowflake to not run, but, part of the testSpecs. + // So, it is an intersection of all tests that are part of the run and all tests that are returned + // by snowflake as successful. + // This is why we need the intersection: + // - testSpec contains all the tests that are currently considered as a part of the current run. + // - The list of tests returned by selector can contain tests may not be part of the test spec. This can + // be because of tests getting decommissioned. + // Now, we want to take the tests common to both. These are the tests from which we need to select + // "successfulTestsSelectPct" percent tests to run. + successfulTests := make([]*testselector.TestDetails, 0) + for _, spec := range specs { + if td, ok := allTests[spec.Name]; ok && !td.Selected { + // adding only the unselected tests that are part of the specs + // These are tests that have been running successfully + successfulTests = append(successfulTests, td) + } + } + // numberOfTestsToSelect is the number of tests to be selected from the successfulTests based on percentage selection + numberOfTestsToSelect := int(math.Ceil(float64(len(successfulTests)) * roachtestflags.SuccessfulTestsSelectPct)) + for i := 0; i < numberOfTestsToSelect; i++ { + successfulTests[i].Selected = true } + fmt.Printf("%d selected out of %d successful tests.\n", numberOfTestsToSelect, len(successfulTests)) for i := range specs { - if testShouldBeSkipped(tdMap, specs[i], roachtestflags.Suite) { + if testShouldBeSkipped(allTests, specs[i], roachtestflags.Suite) { specs[i].Skip = "test selector" specs[i].SkipDetails = "test skipped because it is stable and selective-tests is set." } else { selectedTestsCount++ } - if td, ok := tdMap[specs[i].Name]; ok { + if td, ok := allTests[specs[i].Name]; ok { // populate the stats as obtained from the test selector specs[i].SetStats(td.AvgDurationInMillis, td.LastFailureIsPreempt) } diff --git a/pkg/cmd/roachtest/operations/BUILD.bazel b/pkg/cmd/roachtest/operations/BUILD.bazel index 39fc2129f3f4..280287b7d127 100644 --- a/pkg/cmd/roachtest/operations/BUILD.bazel +++ b/pkg/cmd/roachtest/operations/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "manual_compaction.go", "network_partition.go", "node_kill.go", + "pause_job.go", "register.go", "resize.go", "utils.go", diff --git a/pkg/cmd/roachtest/operations/pause_job.go b/pkg/cmd/roachtest/operations/pause_job.go new file mode 100644 index 000000000000..699119367863 --- /dev/null +++ b/pkg/cmd/roachtest/operations/pause_job.go @@ -0,0 +1,88 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package operations + +import ( + "context" + "fmt" + "time" + + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/operation" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestflags" + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +type resumePausedJob struct { + jobId string +} + +func (r *resumePausedJob) Cleanup(ctx context.Context, o operation.Operation, c cluster.Cluster) { + conn := c.Conn(ctx, o.L(), 1, option.VirtualClusterName(roachtestflags.VirtualCluster)) + defer conn.Close() + + resumeJobStmt := fmt.Sprintf("RESUME JOB %s", r.jobId) + _, err := conn.ExecContext(ctx, resumeJobStmt) + if err != nil { + o.Fatal(err) + } +} + +func pauseLDRJob( + ctx context.Context, o operation.Operation, c cluster.Cluster, +) registry.OperationCleanup { + conn := c.Conn(ctx, o.L(), 1, option.VirtualClusterName(roachtestflags.VirtualCluster)) + defer conn.Close() + + //fetch running ldr jobs + jobs, err := conn.QueryContext(ctx, "(WITH x AS (SHOW JOBS) SELECT job_id FROM x WHERE job_type = 'LOGICAL REPLICATION' AND status = 'running')") + if err != nil { + o.Fatal(err) + } + + var jobIds []string + for jobs.Next() { + var jobId string + if err := jobs.Scan(&jobId); err != nil { + o.Fatal(err) + } + jobIds = append(jobIds, jobId) + } + + //pick a random ldr job + rng, _ := randutil.NewPseudoRand() + jobId := jobIds[rng.Intn(len(jobIds))] + + o.Status(fmt.Sprintf("pausing LDR job %s", jobId)) + pauseJobStmt := fmt.Sprintf("PAUSE JOB %s WITH REASON = 'roachtest operation'", jobId) + _, err = conn.ExecContext(ctx, pauseJobStmt) + if err != nil { + o.Fatal(err) + } + + o.Status(fmt.Sprintf("paused LDR job %s", jobId)) + return &resumePausedJob{ + jobId: jobId, + } +} + +func registerPauseLDRJob(r registry.Registry) { + r.AddOperation(registry.OperationSpec{ + Name: "pause-ldr", + Owner: registry.OwnerDisasterRecovery, + Timeout: 15 * time.Minute, + CompatibleClouds: registry.AllClouds, + Dependencies: []registry.OperationDependency{registry.OperationRequiresLDRJobRunning}, + Run: pauseLDRJob, + }) +} diff --git a/pkg/cmd/roachtest/operations/register.go b/pkg/cmd/roachtest/operations/register.go index 7ba9d91b9e64..712f1664028c 100644 --- a/pkg/cmd/roachtest/operations/register.go +++ b/pkg/cmd/roachtest/operations/register.go @@ -23,4 +23,5 @@ func RegisterOperations(r registry.Registry) { registerBackupRestore(r) registerManualCompaction(r) registerResize(r) + registerPauseLDRJob(r) } diff --git a/pkg/cmd/roachtest/option/BUILD.bazel b/pkg/cmd/roachtest/option/BUILD.bazel index 1c7bb2c2da2b..00e6c8f080f2 100644 --- a/pkg/cmd/roachtest/option/BUILD.bazel +++ b/pkg/cmd/roachtest/option/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "option", srcs = [ - "connection_options.go", + "cluster_options.go", "node_list_option.go", "node_lister.go", "option_interface.go", @@ -19,7 +19,7 @@ go_library( go_test( name = "option_test", - srcs = ["connection_options_test.go"], + srcs = ["cluster_options_test.go"], embed = [":option"], deps = ["@com_github_stretchr_testify//require"], ) diff --git a/pkg/cmd/roachtest/option/cluster_options.go b/pkg/cmd/roachtest/option/cluster_options.go new file mode 100644 index 000000000000..65d922ec6790 --- /dev/null +++ b/pkg/cmd/roachtest/option/cluster_options.go @@ -0,0 +1,202 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package option + +import ( + "fmt" + "reflect" + "time" + + "github.com/cockroachdb/cockroach/pkg/roachprod/install" +) + +// GlobalOptions is the set of all options that cluster functions can +// take. Specific functions should define their own options struct and +// call the `Apply` function to get the options that apply to +// them. Every options struct should contain, by necessity, a subset +// of the fields defined here. +type GlobalOptions struct { + VirtualClusterOptions + User string + DBName string + AuthMode install.PGAuthMode + ConnectionOptions map[string]string +} + +type OptionFunc func(*GlobalOptions) + +// User allows the customization of the user to use when connecting to +// crdb. +func User(user string) OptionFunc { + return func(o *GlobalOptions) { + o.User = user + } +} + +// VirtualClusterName allows the customization of the virtual cluster +// to connect to. If not provided, it will default to the cluster's +// default virtual cluster (or `system` if that's not set.) +func VirtualClusterName(name string) OptionFunc { + return func(o *GlobalOptions) { + o.VirtualClusterName = name + } +} + +// SQLInstance allows the caller to indicate which sql instance to +// use. Only applicable in separate-process virtual clusters when more +// than one instance is running on the same node. +func SQLInstance(sqlInstance int) OptionFunc { + return func(o *GlobalOptions) { + o.SQLInstance = sqlInstance + } +} + +// ConnectionOption allows the caller to provide a custom connection +// option to be included in the pgurl. +func ConnectionOption(key, value string) OptionFunc { + return func(o *GlobalOptions) { + if o.ConnectionOptions == nil { + o.ConnectionOptions = make(map[string]string) + } + + o.ConnectionOptions[key] = value + } +} + +// ConnectTimeout allows callers to set a connection timeout. +func ConnectTimeout(t time.Duration) OptionFunc { + sec := int64(t.Seconds()) + if sec < 1 { + sec = 1 + } + return ConnectionOption("connect_timeout", fmt.Sprintf("%d", sec)) +} + +// DBName changes the database name used when connecting to crdb. +func DBName(dbName string) OptionFunc { + return func(o *GlobalOptions) { + o.DBName = dbName + } +} + +// AuthMode allows the callers to change the authentication mode used +// when connecting to crdb. +func AuthMode(authMode install.PGAuthMode) OptionFunc { + return func(o *GlobalOptions) { + o.AuthMode = authMode + } +} + +// Apply takes in an options struct and a list of user-provided +// options, and applies those options to the container. The container +// should be a pointer to a struct containing the relevant fields -- +// in other words, the struct actually defines that custom options a +// function can take. The struct is expected to have a field for each +// custom option passed. An error is returned if an unrecognized +// option is passed. +func Apply(container any, opts ...OptionFunc) (retErr error) { + var globalOptions GlobalOptions + for _, opt := range opts { + opt(&globalOptions) + } + + // Many functions in the `reflect` package can panic if they + // parameters are of unexpected types, so we wrap these panics as + // errors to be returned to the caller. + var currentField string + defer func() { + if r := recover(); r != nil { + if currentField == "" { + retErr = fmt.Errorf("option.Apply failed: %v", r) + } else { + retErr = fmt.Errorf("failed to set %q on %T: %v", currentField, container, r) + } + } + }() + + isEmbeddedStruct := func(f reflect.StructField) bool { + return f.Type.Kind() == reflect.Struct && f.Anonymous + } + + // Build a mapping from option to name to the corresponding + // `reflect.Value`. We skip embedded struct fields because we are + // only interested in the "flattened" view of the options + // structs. This allows us to reuse options structs across multiple + // functions (for instance, multiple functions need to take options + // related to which virtual cluster to connect to). + globalOptionsValue := reflect.ValueOf(globalOptions) + globalFields := make(map[string]reflect.Value) + for _, f := range reflect.VisibleFields(reflect.TypeOf(globalOptions)) { + if isEmbeddedStruct(f) { + continue + } + + globalFields[f.Name] = globalOptionsValue.FieldByName(f.Name) + } + + // We keep a set of fields from `globalOptions` that are actually + // used by the container struct so that we can validate that the + // caller didn't pass any options that are not applicable. + containerFields := make(map[string]struct{}) + + containerStruct := reflect.ValueOf(container).Elem() + containerType := reflect.TypeOf(containerStruct.Interface()) + for _, structField := range reflect.VisibleFields(containerType) { + if isEmbeddedStruct(structField) { + continue + } + + currentField = structField.Name + f := containerStruct.FieldByName(currentField) + + // It is an error for the container struct to have fields that are + // not present in the `GlobalOptions` struct. + globalField, ok := globalFields[currentField] + if !ok { + return fmt.Errorf("options struct %T has unknown option %q", container, currentField) + } + + // Update the field in the container struct with the value in + // `GlobalOptions`. + f.Set(globalField) + containerFields[currentField] = struct{}{} + } + + // Validate that the caller did not pass any options that do not + // apply to the `container` struct. + for name, f := range globalFields { + if _, ok := containerFields[name]; ok { + continue + } + + if !f.IsZero() { + return fmt.Errorf("non-applicable option %q for %T", name, container) + } + } + + return nil +} + +// VirtualClusterOptions contains the options to be accepted by +// functions that can be applied on a specific virtual cluster. +type VirtualClusterOptions struct { + VirtualClusterName string + SQLInstance int +} + +// ConnOptions contains connection-related options. +type ConnOptions struct { + VirtualClusterOptions + User string + DBName string + AuthMode install.PGAuthMode + ConnectionOptions map[string]string +} diff --git a/pkg/cmd/roachtest/option/cluster_options_test.go b/pkg/cmd/roachtest/option/cluster_options_test.go new file mode 100644 index 000000000000..0e60b11fbc48 --- /dev/null +++ b/pkg/cmd/roachtest/option/cluster_options_test.go @@ -0,0 +1,163 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package option + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestApply(t *testing.T) { + type validContainer struct { + VirtualClusterOptions + ConnectionOptions map[string]string + } + + type wrongTypeContainer struct { + DBName int + } + + type invalidOptionContainer struct { + DBName string + InvalidOption string + } + + testCases := []struct { + name string + optionsStruct string + options []OptionFunc + expected any + expectedErr string + }{ + { + name: "no custom option", + optionsStruct: "valid_container", + expected: validContainer{}, + }, + { + name: "setting a custom virtual cluster name", + optionsStruct: "valid_container", + options: []OptionFunc{VirtualClusterName("app")}, + expected: validContainer{ + VirtualClusterOptions: VirtualClusterOptions{VirtualClusterName: "app"}, + }, + }, + { + name: "setting a single connection option", + optionsStruct: "valid_container", + options: []OptionFunc{ConnectionOption("name", "val")}, + expected: validContainer{ConnectionOptions: map[string]string{"name": "val"}}, + }, + { + name: "setting multiple connection options", + optionsStruct: "valid_container", + options: []OptionFunc{ConnectionOption("name", "val"), ConnectionOption("name2", "val2")}, + expected: validContainer{ConnectionOptions: map[string]string{"name": "val", "name2": "val2"}}, + }, + { + name: "setting a connection option and a virtual cluster", + optionsStruct: "valid_container", + options: []OptionFunc{ConnectionOption("name", "val"), VirtualClusterName("app")}, + expected: validContainer{ + VirtualClusterOptions: VirtualClusterOptions{VirtualClusterName: "app"}, + ConnectionOptions: map[string]string{"name": "val"}, + }, + }, + { + name: "using VirtualClusterOptions directly", + optionsStruct: "virtual_cluster_options", + options: []OptionFunc{VirtualClusterName("app")}, + expected: VirtualClusterOptions{VirtualClusterName: "app"}, + }, + { + name: "setting a non-applicable option", + optionsStruct: "valid_container", + options: []OptionFunc{User("user")}, + expectedErr: `non-applicable option "User" for *option.validContainer`, + }, + { + name: "using a wrong type for a supported option", + optionsStruct: "wrong_type", + options: []OptionFunc{DBName("hello")}, + expectedErr: `failed to set "DBName" on *option.wrongTypeContainer: reflect.Set: value of type string is not assignable to type int`, + }, + { + name: "using an unsupported option", + optionsStruct: "invalid_option", + options: []OptionFunc{DBName("mydb")}, + expectedErr: `option.invalidOptionContainer has unknown option "InvalidOption"`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var err error + var container any + switch tc.optionsStruct { + case "valid_container": + var c validContainer + err = Apply(&c, tc.options...) + container = c + case "virtual_cluster_options": + var c VirtualClusterOptions + err = Apply(&c, tc.options...) + container = c + case "wrong_type": + var c wrongTypeContainer + err = Apply(&c, tc.options...) + container = c + case "invalid_option": + var c invalidOptionContainer + err = Apply(&c, tc.options...) + container = c + default: + t.Fatalf("invalid optionsStruct %s", tc.optionsStruct) + } + + if tc.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, tc.expected, container) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErr) + } + }) + } +} + +func TestTimeoutCalculation(t *testing.T) { + var opts ConnOptions + for _, d := range []struct { + t time.Duration + o string + }{ + { + t: time.Second, + o: "1", + }, + { + t: time.Millisecond, + o: "1", + }, + { + t: time.Minute, + o: "60", + }, + } { + t.Run(d.t.String(), func(t *testing.T) { + o := ConnectTimeout(d.t) + require.NoError(t, Apply(&opts, o)) + require.Equal(t, d.o, opts.ConnectionOptions["connect_timeout"]) + }) + } +} diff --git a/pkg/cmd/roachtest/option/connection_options.go b/pkg/cmd/roachtest/option/connection_options.go deleted file mode 100644 index 587746320791..000000000000 --- a/pkg/cmd/roachtest/option/connection_options.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2023 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package option - -import ( - "fmt" - "time" - - "github.com/cockroachdb/cockroach/pkg/roachprod/install" -) - -type ConnOption struct { - User string - DBName string - VirtualClusterName string - SQLInstance int - AuthMode install.PGAuthMode - Options map[string]string -} - -func User(user string) func(*ConnOption) { - return func(option *ConnOption) { - option.User = user - } -} - -func VirtualClusterName(name string) func(*ConnOption) { - return func(option *ConnOption) { - option.VirtualClusterName = name - } -} - -func SQLInstance(sqlInstance int) func(*ConnOption) { - return func(option *ConnOption) { - option.SQLInstance = sqlInstance - } -} - -func ConnectionOption(key, value string) func(*ConnOption) { - return func(option *ConnOption) { - if len(option.Options) == 0 { - option.Options = make(map[string]string) - } - option.Options[key] = value - } -} - -func ConnectTimeout(t time.Duration) func(*ConnOption) { - sec := int64(t.Seconds()) - if sec < 1 { - sec = 1 - } - return ConnectionOption("connect_timeout", fmt.Sprintf("%d", sec)) -} - -func DBName(dbName string) func(*ConnOption) { - return func(option *ConnOption) { - option.DBName = dbName - } -} - -func AuthMode(authMode install.PGAuthMode) func(*ConnOption) { - return func(option *ConnOption) { - option.AuthMode = authMode - } -} diff --git a/pkg/cmd/roachtest/option/connection_options_test.go b/pkg/cmd/roachtest/option/connection_options_test.go deleted file mode 100644 index c853374954e0..000000000000 --- a/pkg/cmd/roachtest/option/connection_options_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2023 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package option - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestFirstOptionCreatesMap(t *testing.T) { - var opts ConnOption - o := ConnectionOption("a", "b") - o(&opts) - require.NotNil(t, opts.Options) -} - -func TestTimeoutCalculation(t *testing.T) { - var opts ConnOption - for _, d := range []struct { - t time.Duration - o string - }{ - { - t: time.Second, - o: "1", - }, - { - t: time.Millisecond, - o: "1", - }, - { - t: time.Minute, - o: "60", - }, - } { - t.Run(d.t.String(), func(t *testing.T) { - o := ConnectTimeout(d.t) - o(&opts) - require.Equal(t, d.o, opts.Options["connect_timeout"]) - }) - } -} diff --git a/pkg/cmd/roachtest/option/options.go b/pkg/cmd/roachtest/option/options.go index 2ef558405a94..dafb726ecf13 100644 --- a/pkg/cmd/roachtest/option/options.go +++ b/pkg/cmd/roachtest/option/options.go @@ -224,13 +224,13 @@ func NoBackupSchedule(opts interface{}) { } // Graceful performs a graceful stop of the cockroach process. -func Graceful(maxWaitSeconds int) func(interface{}) { +func Graceful(gracePeriodSeconds int) func(interface{}) { return func(opts interface{}) { switch opts := opts.(type) { case *StopOpts: opts.RoachprodOpts.Sig = 15 // SIGTERM opts.RoachprodOpts.Wait = true - opts.RoachprodOpts.MaxWait = maxWaitSeconds + opts.RoachprodOpts.GracePeriod = gracePeriodSeconds } } } diff --git a/pkg/cmd/roachtest/registry/filter_test.go b/pkg/cmd/roachtest/registry/filter_test.go index 0f7abd8fee13..a59f8415586c 100644 --- a/pkg/cmd/roachtest/registry/filter_test.go +++ b/pkg/cmd/roachtest/registry/filter_test.go @@ -22,9 +22,8 @@ import ( func init() { OverrideTeams(team.Map{ - OwnerCDC.ToTeamAlias(): {}, - OwnerKV.ToTeamAlias(): {}, - OwnerReplication.ToTeamAlias(): {}, + OwnerCDC.ToTeamAlias(): {}, + OwnerKV.ToTeamAlias(): {}, }) } diff --git a/pkg/cmd/roachtest/registry/operation_spec.go b/pkg/cmd/roachtest/registry/operation_spec.go index d83da9a2bf03..dc37b6d710c3 100644 --- a/pkg/cmd/roachtest/registry/operation_spec.go +++ b/pkg/cmd/roachtest/registry/operation_spec.go @@ -30,6 +30,7 @@ const ( OperationRequiresPopulatedDatabase OperationRequiresZeroUnavailableRanges OperationRequiresZeroUnderreplicatedRanges + OperationRequiresLDRJobRunning ) // OperationCleanup specifies an operation that diff --git a/pkg/cmd/roachtest/registry/owners.go b/pkg/cmd/roachtest/registry/owners.go index 5498007125d3..b7d2372a53bd 100644 --- a/pkg/cmd/roachtest/registry/owners.go +++ b/pkg/cmd/roachtest/registry/owners.go @@ -27,7 +27,6 @@ const ( OwnerCDC Owner = `cdc` OwnerDisasterRecovery Owner = `disaster-recovery` OwnerKV Owner = `kv` - OwnerReplication Owner = `replication` OwnerAdmissionControl Owner = `admission-control` OwnerObservability Owner = `obs-prs` OwnerObservabilityIndia Owner = `obs-india-prs` diff --git a/pkg/cmd/roachtest/registry/testdata/filter/describe b/pkg/cmd/roachtest/registry/testdata/filter/describe index 147c9ac402fa..7cfac9b6ecad 100644 --- a/pkg/cmd/roachtest/registry/testdata/filter/describe +++ b/pkg/cmd/roachtest/registry/testdata/filter/describe @@ -24,9 +24,9 @@ describe cloud=gce suite=nightly ---- tests which are compatible with cloud "gce" and are part of the "nightly" suite -describe cloud=local owner=replication benchmarks +describe cloud=local owner=kv benchmarks ---- -benchmarks which are compatible with cloud "local" and have owner "replication" +benchmarks which are compatible with cloud "local" and have owner "kv" describe cloud=gce suite=nightly foo diff --git a/pkg/cmd/roachtest/registry/testdata/filter/errors b/pkg/cmd/roachtest/registry/testdata/filter/errors index 4fc727872f7a..c0daa7db7d38 100644 --- a/pkg/cmd/roachtest/registry/testdata/filter/errors +++ b/pkg/cmd/roachtest/registry/testdata/filter/errors @@ -23,10 +23,6 @@ filter suite=orm ---- error: no tests in suite "orm" -filter owner=replication ----- -error: no tests with owner "replication" - filter benchmarks component_blargle ---- diff --git a/pkg/cmd/roachtest/roachtestflags/flags.go b/pkg/cmd/roachtest/roachtestflags/flags.go index a301811f6762..33e5afb9b777 100644 --- a/pkg/cmd/roachtest/roachtestflags/flags.go +++ b/pkg/cmd/roachtest/roachtestflags/flags.go @@ -94,6 +94,12 @@ var ( Usage: `Use selective tests to run based on previous test execution. this is considered only if the select-probability is 1.0`, }) + SuccessfulTestsSelectPct = 0.35 + _ = registerRunFlag(&SuccessfulTestsSelectPct, FlagInfo{ + Name: "successful-test-select-pct", + Usage: `The percent of test that should be selected from the tests that have been running successfully as per test selection. Default is 0.35`, + }) + Username string = os.Getenv("ROACHPROD_USER") _ = registerRunFlag(&Username, FlagInfo{ Name: "user", @@ -185,6 +191,15 @@ var ( for tests that support 'arm64' (default 0)`, }) + CockroachEAProbability float64 = defaultCockroachEAProbability + _ = registerRunFlag(&CockroachEAProbability, FlagInfo{ + Name: "metamorphic-cockroach-ea-probability", + Usage: ` + Probability that tests will be run with assertions enabled. A cockroach + binary built with the --crdb_test flag must be passed to --cockroach-ea + for assertions to be enabled.`, + }) + // ArtifactsDir is a path to a local dir where the test logs and artifacts // collected from cluster will be placed. ArtifactsDir string = "artifacts" @@ -473,12 +488,13 @@ var ( ) const ( - defaultEncryptionProbability = 1 - defaultFIPSProbability = 0 - defaultARM64Probability = 0 - NeverUseSpot = "never" - AlwaysUseSpot = "always" - AutoUseSpot = "auto" + defaultEncryptionProbability = 1 + defaultFIPSProbability = 0 + defaultARM64Probability = 0 + defaultCockroachEAProbability = 0 + NeverUseSpot = "never" + AlwaysUseSpot = "always" + AutoUseSpot = "auto" ) // FlagInfo contains the name and usage of a flag. Used to make the code diff --git a/pkg/cmd/roachtest/roachtestutil/BUILD.bazel b/pkg/cmd/roachtest/roachtestutil/BUILD.bazel index d9cfa98fd7b9..644e35eea6e4 100644 --- a/pkg/cmd/roachtest/roachtestutil/BUILD.bazel +++ b/pkg/cmd/roachtest/roachtestutil/BUILD.bazel @@ -17,6 +17,7 @@ go_library( deps = [ "//pkg/cmd/roachtest/cluster", "//pkg/cmd/roachtest/option", + "//pkg/cmd/roachtest/spec", "//pkg/cmd/roachtest/test", "//pkg/kv/kvpb", "//pkg/roachprod/config", diff --git a/pkg/cmd/roachtest/roachtestutil/clusterupgrade/clusterupgrade.go b/pkg/cmd/roachtest/roachtestutil/clusterupgrade/clusterupgrade.go index ea3da3bfdbbf..05bcd89a6ffc 100644 --- a/pkg/cmd/roachtest/roachtestutil/clusterupgrade/clusterupgrade.go +++ b/pkg/cmd/roachtest/roachtestutil/clusterupgrade/clusterupgrade.go @@ -356,7 +356,7 @@ func RestartNodesWithNewBinary( newVersion *Version, settings ...install.ClusterSettingOption, ) error { - const maxWait = 300 // 5 minutes + const gracePeriod = 300 // 5 minutes // NB: We could technically stage the binary on all nodes before // restarting each one, but on Unix it's invalid to write to an @@ -379,7 +379,7 @@ func RestartNodesWithNewBinary( // TODO(yuzefovich): ideally, we would also check that the drain was // successful since if it wasn't, then we might see flakes too. if err := c.StopE( - ctx, l, option.NewStopOpts(option.Graceful(maxWait)), c.Node(node), + ctx, l, option.NewStopOpts(option.Graceful(gracePeriod)), c.Node(node), ); err != nil { return err } diff --git a/pkg/cmd/roachtest/roachtestutil/disk_stall.go b/pkg/cmd/roachtest/roachtestutil/disk_stall.go index 73c55c1afa97..56b076bbf5ce 100644 --- a/pkg/cmd/roachtest/roachtestutil/disk_stall.go +++ b/pkg/cmd/roachtest/roachtestutil/disk_stall.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" ) @@ -60,6 +61,10 @@ func (s *cgroupDiskStaller) LogDir() string { return "logs" } func (s *cgroupDiskStaller) Setup(ctx context.Context) { + if _, ok := s.c.Spec().ReusePolicy.(spec.ReusePolicyNone); !ok { + // Safety measure. + s.f.Fatalf("cluster needs ReusePolicyNone to support disk stalls") + } if s.logsToo { s.c.Run(ctx, option.WithNodes(s.c.All()), "mkdir -p {store-dir}/logs") s.c.Run(ctx, option.WithNodes(s.c.All()), "rm -f logs && ln -s {store-dir}/logs logs || true") @@ -176,6 +181,8 @@ func GetDiskDevice(f Fataler, c cluster.Cluster, nodes option.NodeListOption) st type dmsetupDiskStaller struct { f Fataler c cluster.Cluster + + dev string // set in Setup; s.device() doesn't work when volume is not set up } var _ DiskStaller = (*dmsetupDiskStaller)(nil) @@ -185,14 +192,20 @@ func (s *dmsetupDiskStaller) device(nodes option.NodeListOption) string { } func (s *dmsetupDiskStaller) Setup(ctx context.Context) { - dev := s.device(s.c.All()) + if _, ok := s.c.Spec().ReusePolicy.(spec.ReusePolicyNone); !ok { + // We disable journaling and do all kinds of things below. + s.f.Fatalf("cluster needs ReusePolicyNone to support disk stalls") + } + s.dev = s.device(s.c.All()) // snapd will run "snapd auto-import /dev/dm-0" via udev triggers when // /dev/dm-0 is created. This possibly interferes with the dmsetup create // reload, so uninstall snapd. s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo apt-get purge -y snapd`) s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo umount -f /mnt/data1 || true`) s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo dmsetup remove_all`) - err := s.c.RunE(ctx, option.WithNodes(s.c.All()), `echo "0 $(sudo blockdev --getsz `+dev+`) linear `+dev+` 0" | `+ + // See https://github.com/cockroachdb/cockroach/issues/129619#issuecomment-2316147244. + s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo tune2fs -O ^has_journal `+s.dev) + err := s.c.RunE(ctx, option.WithNodes(s.c.All()), `echo "0 $(sudo blockdev --getsz `+s.dev+`) linear `+s.dev+` 0" | `+ `sudo dmsetup create data1`) if err != nil { // This has occasionally been seen to fail with "Device or resource busy", @@ -207,6 +220,7 @@ func (s *dmsetupDiskStaller) Cleanup(ctx context.Context) { s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo dmsetup resume data1`) s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo umount /mnt/data1`) s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo dmsetup remove_all`) + s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo tune2fs -O has_journal `+s.dev) s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo mount /mnt/data1`) // Reinstall snapd in case subsequent tests need it. s.c.Run(ctx, option.WithNodes(s.c.All()), `sudo apt-get install -y snapd`) diff --git a/pkg/cmd/roachtest/roachtestutil/mixedversion/helper.go b/pkg/cmd/roachtest/roachtestutil/mixedversion/helper.go index cee2d0ad7c1c..3217285282b7 100644 --- a/pkg/cmd/roachtest/roachtestutil/mixedversion/helper.go +++ b/pkg/cmd/roachtest/roachtestutil/mixedversion/helper.go @@ -72,10 +72,17 @@ type ( } ) +// Connect returns a connection pool to the given node. Note that +// these connection pools are managed by the framework and therefore +// *must not* be closed. They are closed automatically when the test +// finishes. func (s *Service) Connect(node int) *gosql.DB { return s.connFunc(node) } +// RandomDB returns a connection pool to a random node in the +// cluster. Do *not* call `Close` on the pool returned (see comment on +// `Connect` function). func (s *Service) RandomDB(rng *rand.Rand) (int, *gosql.DB) { node := s.Descriptor.Nodes.SeededRandNode(rng)[0] return node, s.Connect(node) diff --git a/pkg/cmd/roachtest/roachtestutil/mixedversion/mixedversion.go b/pkg/cmd/roachtest/roachtestutil/mixedversion/mixedversion.go index 82f8f8742696..a5523ba75ce4 100644 --- a/pkg/cmd/roachtest/roachtestutil/mixedversion/mixedversion.go +++ b/pkg/cmd/roachtest/roachtestutil/mixedversion/mixedversion.go @@ -573,8 +573,23 @@ func (t *Test) RNG() *rand.Rand { // InMixedVersion adds a new mixed-version hook to the test. The // functionality in the function passed as argument to this function -// will be tested in arbitrary mixed-version states. If multiple -// InMixedVersion hooks are passed, they may be executed +// will be tested in arbitrary mixed-version states; specifically, it +// can be called up to four times during each major upgrade +// performed: +// +// 1. when the cluster upgrades to the new binary (`preserve_downgrade_option` set) +// 2. when the cluster downgrades to the old binary +// 3. when the cluster upgrades to the new binary again +// 4. when the cluster is finalizing +// +// Note that not every major upgrade performs a downgrade. In those +// cases, the InMixedVersion hook would only be called up to two times +// (when the cluster upgrades to the new binary, and when the cluster +// is finalizing the upgrade). Callers can use `h.Context().Stage` to +// find out the stage in the upgrade in which the hook is being +// called. +// +// If multiple InMixedVersion hooks are passed, they may be executed // concurrently. func (t *Test) InMixedVersion(desc string, fn stepFunc) { var prevUpgradeStage UpgradeStage @@ -613,10 +628,10 @@ func (t *Test) OnStartup(desc string, fn stepFunc) { t.hooks.AddStartup(versionUpgradeHook{name: desc, fn: fn}) } -// AfterUpgradeFinalized registers a callback that is run once the -// mixed-version test has brought the cluster to the latest version, -// and allowed the upgrade to finalize successfully. If multiple such -// hooks are passed, they will be executed concurrently. +// AfterUpgradeFinalized registers a callback that is run once per +// major upgrade performed in a test, after the upgrade is finalized +// successfully. If multiple such hooks are passed, they may be +// executed concurrently. func (t *Test) AfterUpgradeFinalized(desc string, fn stepFunc) { t.hooks.AddAfterUpgradeFinalized(versionUpgradeHook{name: desc, fn: fn}) } @@ -1037,12 +1052,22 @@ func newSingleStep(context *Context, impl singleStepProtocol, rng *rand.Rand) *s } // prefixedLogger returns a logger instance off of the given `l` -// parameter, and adds a prefix to everything logged by the retured -// logger. +// parameter. The path and prefix are the same. func prefixedLogger(l *logger.Logger, prefix string) (*logger.Logger, error) { - fileName := strings.ReplaceAll(prefix, " ", "-") - formattedPrefix := fmt.Sprintf("[%s] ", fileName) - return l.ChildLogger(fileName, logger.LogPrefix(formattedPrefix)) + filename := sanitizePath(prefix) + return prefixedLoggerWithFilename(l, filename, filename) +} + +// prefixedLoggerWithFilename returns a logger instance with the given +// prefix. The logger will write to a file on the given `path`, +// relative to the logger `l`'s location. +func prefixedLoggerWithFilename(l *logger.Logger, prefix, path string) (*logger.Logger, error) { + formattedPrefix := fmt.Sprintf("[%s] ", prefix) + return l.ChildLogger(path, logger.LogPrefix(formattedPrefix)) +} + +func sanitizePath(s string) string { + return strings.ReplaceAll(s, " ", "-") } func (h hooks) Filter(testContext Context) hooks { diff --git a/pkg/cmd/roachtest/roachtestutil/mixedversion/runner.go b/pkg/cmd/roachtest/roachtestutil/mixedversion/runner.go index 99c6aaeeb61f..6fdbd844a619 100644 --- a/pkg/cmd/roachtest/roachtestutil/mixedversion/runner.go +++ b/pkg/cmd/roachtest/roachtestutil/mixedversion/runner.go @@ -195,11 +195,19 @@ func (tr *testRunner) run() (retErr error) { stepsErr := make(chan error) defer func() { tr.teardown(stepsErr, retErr != nil) }() defer func() { - // If the test failed an we haven't run any user hooks up to this - // point, redirect the failure to Test Eng, as this indicates a - // setup problem that should be investigated separately. - if retErr != nil && !tr.ranUserHooks.Load() { - retErr = registry.ErrorWithOwner(registry.OwnerTestEng, retErr) + if retErr != nil { + // If the test failed an we haven't run any user hooks up to this + // point, redirect the failure to Test Eng, as this indicates a + // setup problem that should be investigated separately. + if !tr.ranUserHooks.Load() { + retErr = registry.ErrorWithOwner(registry.OwnerTestEng, retErr) + } + + // If this test run had a tag assigned, wrap the error with that + // tag to make it more immediately clear which run failed. + if tr.tag != "" { + retErr = errors.Wrapf(retErr, "%s", tr.tag) + } } }() @@ -240,13 +248,13 @@ func (tr *testRunner) runStep(ctx context.Context, step testStep) error { if ss, ok := step.(*singleStep); ok { if ss.ID > tr.plan.startSystemID { if err := tr.refreshServiceData(ctx, tr.systemService); err != nil { - return err + return errors.Wrapf(err, "preparing to run step %d", ss.ID) } } if ss.ID > tr.plan.startTenantID && tr.tenantService != nil { if err := tr.refreshServiceData(ctx, tr.tenantService); err != nil { - return err + return errors.Wrapf(err, "preparing to run step %d", ss.ID) } } } @@ -525,9 +533,7 @@ func (tr *testRunner) loggerFor(step *singleStep) (*logger.Logger, error) { name = fmt.Sprintf("%d_%s", step.ID, name) prefix := filepath.Join(tr.tag, logPrefix, name) - // Use the root logger here as the `prefix` passed will already - // include the full path from the root, including the tag. - return prefixedLogger(tr.logger.RootLogger(), prefix) + return prefixedLoggerWithFilename(tr.logger, prefix, filepath.Join(logPrefix, name)) } // refreshBinaryVersions updates the `binaryVersions` field for every diff --git a/pkg/cmd/roachtest/roachtestutil/operations/dependency.go b/pkg/cmd/roachtest/roachtestutil/operations/dependency.go index 07a273b0a4d8..b503ab77218d 100644 --- a/pkg/cmd/roachtest/roachtestutil/operations/dependency.go +++ b/pkg/cmd/roachtest/roachtestutil/operations/dependency.go @@ -80,6 +80,20 @@ func CheckDependencies( if count != 0 { return false, nil } + case registry.OperationRequiresLDRJobRunning: + conn := c.Conn(ctx, l, 1, option.VirtualClusterName("system")) + defer conn.Close() + + jobsCur, err := conn.QueryContext(ctx, "(WITH x AS (SHOW JOBS) SELECT job_id FROM x WHERE job_type = 'LOGICAL REPLICATION' AND status = 'running' limit 1)") + if err != nil { + return false, err + } + jobsCur.Next() + var jobId string + _ = jobsCur.Scan(&jobId) + if jobId == "" { + return false, nil + } default: panic(fmt.Sprintf("unknown operation dependency %d", dep)) } diff --git a/pkg/cmd/roachtest/run.go b/pkg/cmd/roachtest/run.go index 92ca1762e71a..951bfcf5b58c 100644 --- a/pkg/cmd/roachtest/run.go +++ b/pkg/cmd/roachtest/run.go @@ -272,6 +272,9 @@ func initRunFlagsBinariesAndLibraries(cmd *cobra.Command) error { if !(0 <= roachtestflags.FIPSProbability && roachtestflags.FIPSProbability <= 1) { return fmt.Errorf("'metamorphic-fips-probability' must be in [0,1]") } + if !(0 <= roachtestflags.CockroachEAProbability && roachtestflags.CockroachEAProbability <= 1) { + return fmt.Errorf("'metamorphic-cockroach-ea-probability' must be in [0,1]") + } if roachtestflags.ARM64Probability == 1 && roachtestflags.FIPSProbability != 0 { return fmt.Errorf("'metamorphic-fips-probability' must be 0 when 'metamorphic-arm64-probability' is 1") } @@ -319,7 +322,11 @@ func CtrlC(ctx context.Context, l *logger.Logger, cancel func(), cr *clusterRegi sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) go func() { - <-sig + select { + case <-sig: + case <-ctx.Done(): + return + } shout(ctx, l, os.Stderr, "Signaled received. Canceling workers and waiting up to 5s for them.") // Signal runner.Run() to stop. @@ -507,7 +514,7 @@ func maybeEmitDatadogEvent( _, _, _ = datadogEventsAPI.CreateEvent(ctx, datadogV1.EventCreateRequest{ AggregationKey: datadog.PtrString(fmt.Sprintf("operation-%d", operationID)), AlertType: &alertType, - DateHappened: datadog.PtrInt64(timeutil.Now().UnixNano()), + DateHappened: datadog.PtrInt64(timeutil.Now().Unix()), Host: &hostname, SourceTypeName: datadog.PtrString("roachtest"), Tags: append(datadogTags, @@ -670,7 +677,7 @@ func runOperation(register func(registry.Registry), filter string, clusterName s } op.spec = opSpec - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Cancel this context if we get an interrupt. CtrlC(ctx, l, cancel, nil /* registry */) diff --git a/pkg/cmd/roachtest/spec/cluster_spec.go b/pkg/cmd/roachtest/spec/cluster_spec.go index a7301dd23387..1e3409e8b45f 100644 --- a/pkg/cmd/roachtest/spec/cluster_spec.go +++ b/pkg/cmd/roachtest/spec/cluster_spec.go @@ -212,7 +212,7 @@ func awsMachineSupportsSSD(machineType string) bool { } func getAWSOpts( - machineType string, zones []string, volumeSize, ebsThroughput int, localSSD bool, useSpotVMs bool, + machineType string, volumeSize, ebsThroughput int, localSSD bool, useSpotVMs bool, ) vm.ProviderOpts { opts := aws.DefaultProviderOpts() if volumeSize != 0 { @@ -229,16 +229,12 @@ func getAWSOpts( } else { opts.MachineType = machineType } - if len(zones) != 0 { - opts.CreateZones = zones - } opts.UseSpot = useSpotVMs return opts } func getGCEOpts( machineType string, - zones []string, volumeSize, localSSDCount int, localSSD bool, RAID0 bool, @@ -259,9 +255,6 @@ func getGCEOpts( if volumeSize != 0 { opts.PDVolumeSize = volumeSize } - if len(zones) != 0 { - opts.Zones = zones - } opts.SSDCount = localSSDCount if localSSD && localSSDCount > 0 { // NB: As the default behavior for _roachprod_ (at least in AWS/GCP) is @@ -279,12 +272,9 @@ func getGCEOpts( return opts } -func getAzureOpts(machineType string, zones []string, volumeSize int) vm.ProviderOpts { +func getAzureOpts(machineType string, volumeSize int) vm.ProviderOpts { opts := azure.DefaultProviderOpts() opts.MachineType = machineType - if len(zones) != 0 { - opts.Locations = zones - } if volumeSize != 0 { opts.NetworkDiskSize = int32(volumeSize) } @@ -446,38 +436,15 @@ func (s *ClusterSpec) RoachprodOpts( } } - zonesStr := params.Defaults.Zones - switch cloud { - case AWS: - if s.AWS.Zones != "" { - zonesStr = s.AWS.Zones - } - case GCE: - if s.GCE.Zones != "" { - zonesStr = s.GCE.Zones - } - case Azure: - if s.Azure.Zones != "" { - zonesStr = s.Azure.Zones - } - } - var zones []string - if zonesStr != "" { - zones = strings.Split(zonesStr, ",") - if !s.Geo { - zones = zones[:1] - } - } - var workloadMachineType string var err error switch cloud { case AWS: - workloadMachineType, _, err = SelectAWSMachineType(s.WorkloadNodeCPUs, s.Mem, preferLocalSSD && s.VolumeSize == 0, requestedArch) + workloadMachineType, _, err = SelectAWSMachineType(s.WorkloadNodeCPUs, s.Mem, preferLocalSSD && s.VolumeSize == 0, selectedArch) case GCE: - workloadMachineType, _ = SelectGCEMachineType(s.WorkloadNodeCPUs, s.Mem, requestedArch) + workloadMachineType, _ = SelectGCEMachineType(s.WorkloadNodeCPUs, s.Mem, selectedArch) case Azure: - workloadMachineType, _, err = SelectAzureMachineType(s.WorkloadNodeCPUs, s.Mem, requestedArch) + workloadMachineType, _, err = SelectAzureMachineType(s.WorkloadNodeCPUs, s.Mem, selectedArch) } if err != nil { return vm.CreateOpts{}, nil, nil, "", err @@ -492,27 +459,101 @@ func (s *ClusterSpec) RoachprodOpts( var workloadProviderOpts vm.ProviderOpts switch cloud { case AWS: - providerOpts = getAWSOpts(machineType, zones, s.VolumeSize, s.AWS.VolumeThroughput, + providerOpts = getAWSOpts(machineType, s.VolumeSize, s.AWS.VolumeThroughput, createVMOpts.SSDOpts.UseLocalSSD, s.UseSpotVMs) - workloadProviderOpts = getAWSOpts(workloadMachineType, zones, s.VolumeSize, s.AWS.VolumeThroughput, + workloadProviderOpts = getAWSOpts(workloadMachineType, s.VolumeSize, s.AWS.VolumeThroughput, createVMOpts.SSDOpts.UseLocalSSD, s.UseSpotVMs) case GCE: - providerOpts = getGCEOpts(machineType, zones, s.VolumeSize, ssdCount, + providerOpts = getGCEOpts(machineType, s.VolumeSize, ssdCount, createVMOpts.SSDOpts.UseLocalSSD, s.RAID0, s.TerminateOnMigration, s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.UseSpotVMs, ) - workloadProviderOpts = getGCEOpts(workloadMachineType, zones, s.VolumeSize, ssdCount, + workloadProviderOpts = getGCEOpts(workloadMachineType, s.VolumeSize, ssdCount, createVMOpts.SSDOpts.UseLocalSSD, s.RAID0, s.TerminateOnMigration, s.GCE.MinCPUPlatform, vm.ParseArch(createVMOpts.Arch), s.GCE.VolumeType, s.UseSpotVMs, ) case Azure: - providerOpts = getAzureOpts(machineType, zones, s.VolumeSize) - workloadProviderOpts = getAzureOpts(workloadMachineType, zones, s.VolumeSize) + providerOpts = getAzureOpts(machineType, s.VolumeSize) + workloadProviderOpts = getAzureOpts(workloadMachineType, s.VolumeSize) } return createVMOpts, providerOpts, workloadProviderOpts, selectedArch, nil } +// SetRoachprodOptsZones updates the providerOpts with the VM zones as specified in the params/spec. +// We separate this logic from RoachprodOpts as we may need to call this multiple times in order to +// randomize the default GCE zone. +func (s *ClusterSpec) SetRoachprodOptsZones( + providerOpts, workloadProviderOpts vm.ProviderOpts, params RoachprodClusterConfig, arch string, +) (vm.ProviderOpts, vm.ProviderOpts) { + zonesStr := params.Defaults.Zones + cloud := params.Cloud + switch cloud { + case AWS: + if s.AWS.Zones != "" { + zonesStr = s.AWS.Zones + } + case GCE: + if s.GCE.Zones != "" { + zonesStr = s.GCE.Zones + } + case Azure: + if s.Azure.Zones != "" { + zonesStr = s.Azure.Zones + } + } + var zones []string + if zonesStr != "" { + zones = strings.Split(zonesStr, ",") + if !s.Geo { + zones = zones[:1] + } + } + + switch cloud { + case AWS: + if len(zones) == 0 { + if !s.Geo { + zones = aws.DefaultZones[:1] + } else { + zones = aws.DefaultZones + } + } + providerOpts.(*aws.ProviderOpts).CreateZones = zones + workloadProviderOpts.(*aws.ProviderOpts).CreateZones = zones + case GCE: + // We randomize the list of default zones for GCE for quota reasons, so decide the zone + // early to ensure that the workload node and CRDB cluster have the same default zone. + if len(zones) == 0 { + if !s.Geo { + zones = gce.DefaultZones(arch)[:1] + } else { + zones = gce.DefaultZones(arch) + } + } + providerOpts.(*gce.ProviderOpts).Zones = zones + workloadProviderOpts.(*gce.ProviderOpts).Zones = zones + case Azure: + // Azure splits up the availability zone from the region and roachprod + // assumes that only one zone is ever used. So we're not actually changing + // the zone here, just the region. + // TODO(darrylwong): we should support multiple zones. To keep things + // consistent amongst clouds, we could keep the zone=region+az convention + // and parse it at the provider level. + if len(zones) == 0 { + if !s.Geo { + zones = azure.DefaultLocations[:1] + } else { + zones = azure.DefaultLocations + } + } + // Azure accepts + providerOpts.(*azure.ProviderOpts).Locations = zones + workloadProviderOpts.(*azure.ProviderOpts).Locations = zones + } + return providerOpts, workloadProviderOpts +} + // Expiration is the lifetime of the cluster. It may be destroyed after // the expiration has passed. func (s *ClusterSpec) Expiration() time.Time { diff --git a/pkg/cmd/roachtest/test/test_interface.go b/pkg/cmd/roachtest/test/test_interface.go index 6b8bc92e03e2..b077e779c605 100644 --- a/pkg/cmd/roachtest/test/test_interface.go +++ b/pkg/cmd/roachtest/test/test_interface.go @@ -25,12 +25,6 @@ const DefaultCockroachPath = "./cockroach" // node if one is provisioned. const DefaultDeprecatedWorkloadPath = "./workload" -// EnvAssertionsEnabledSeed is the name of the environment variable -// that, when set, causes roachtest to use a binary with runtime -// assertions enabled (if available), using the random seed contained -// in that environment variable. -var EnvAssertionsEnabledSeed = "ROACHTEST_ASSERTIONS_ENABLED_SEED" - // Test is the interface through which roachtests interact with the // test harness. type Test interface { diff --git a/pkg/cmd/roachtest/test_impl.go b/pkg/cmd/roachtest/test_impl.go index ddbea779737b..4e0ba9ebc391 100644 --- a/pkg/cmd/roachtest/test_impl.go +++ b/pkg/cmd/roachtest/test_impl.go @@ -21,7 +21,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestflags" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -146,41 +146,44 @@ func (t *testImpl) BuildVersion() *version.Version { } // Cockroach will return either `RuntimeAssertionsCockroach()` or -// `StandardCockroach()`, picked randomly. Once a random choice has -// been made, the same binary will be returned on every call to -// `Cockroach`, to avoid errors that may arise from binaries having a +// StandardCockroach(), picked based off of the test spec. Once a choice +// has been made, the same binary will be returned on every call to +// Cockroach, to avoid errors that may arise from binaries having a // different value for metamorphic constants. func (t *testImpl) Cockroach() string { - // If the test is a benchmark test, we don't want to enable assertions - // as it will slow down performance. - if t.spec.Benchmark { - t.l.Printf("Benchmark test, running with standard cockroach") - return t.StandardCockroach() - } t.randomCockroachOnce.Do(func() { - //TODO(SR): assertions are temporarily disabled for _all_ tests except those using t.RuntimeAssertionsCockroach() - // directly, until after the stability period for 23.2. See https://github.com/cockroachdb/cockroach/issues/114615 - assertionsEnabledProbability := 0.0 - // If the user specified a custom seed to be used with runtime - // assertions, assume they want to run the test with assertions - // enabled, making it easier to reproduce issues. - if os.Getenv(test.EnvAssertionsEnabledSeed) != "" { - assertionsEnabledProbability = 1 - } - - if rand.Float64() < assertionsEnabledProbability { - // The build with runtime assertions should exist in every nightly - // CI build, but we can't assume it exists in every roachtest call. - if path := t.RuntimeAssertionsCockroach(); path != "" { - t.l.Printf("Runtime assertions enabled") - t.randomizedCockroach = path + switch t.Spec().(*registry.TestSpec).CockroachBinary { + case registry.RandomizedCockroach: + // If the test is a benchmark test, we don't want to enable assertions + // as it will slow down performance. + if t.spec.Benchmark { + t.l.Printf("Benchmark test, running with standard cockroach") + t.randomizedCockroach = t.StandardCockroach() return - } else { - t.l.Printf("WARNING: running without runtime assertions since the corresponding binary was not specified") } + + if rand.Float64() < roachtestflags.CockroachEAProbability { + // The build with runtime assertions should exist in every nightly + // CI build, but we can't assume it exists in every roachtest call. + if path := t.RuntimeAssertionsCockroach(); path != "" { + t.l.Printf("Runtime assertions enabled") + t.randomizedCockroach = path + return + } else { + t.l.Printf("WARNING: running without runtime assertions since the corresponding binary was not specified") + } + } + t.l.Printf("Runtime assertions disabled") + t.randomizedCockroach = t.StandardCockroach() + case registry.StandardCockroach: + t.l.Printf("Runtime assertions disabled: registry.StandardCockroach set") + t.randomizedCockroach = t.StandardCockroach() + case registry.RuntimeAssertionsCockroach: + t.l.Printf("Runtime assertions enabled: registry.RuntimeAssertionsCockroach set") + t.randomizedCockroach = t.RuntimeAssertionsCockroach() + default: + t.Fatal("Specified cockroach binary does not exist.") } - t.l.Printf("Runtime assertions disabled") - t.randomizedCockroach = t.StandardCockroach() }) return t.randomizedCockroach diff --git a/pkg/cmd/roachtest/test_runner.go b/pkg/cmd/roachtest/test_runner.go index e3060aa6564b..d88120d48f11 100644 --- a/pkg/cmd/roachtest/test_runner.go +++ b/pkg/cmd/roachtest/test_runner.go @@ -39,6 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/tests" "github.com/cockroachdb/cockroach/pkg/roachprod/config" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/roachprod/vm" @@ -343,11 +344,11 @@ func (r *testRunner) Run( } // TODO(bhaskar): remove this once we have more usage details // and more convinced about using spot VMs for all the runs. - if (roachtestflags.Cloud == spec.GCE || roachtestflags.Cloud == spec.AWS) && - tests[i].Benchmark && + if (roachtestflags.Cloud == spec.GCE || (roachtestflags.Cloud == spec.AWS && + tests[i].Benchmark)) && !tests[i].Suites.Contains(registry.Weekly) && !tests[i].IsLastFailurePreempt() && - rand.Float64() <= 0.8 { + rand.Float64() <= 0.75 { lopt.l.PrintfCtx(ctx, "using spot VMs to run test %s", tests[i].Name) tests[i].Cluster.UseSpotVMs = true } @@ -1309,7 +1310,21 @@ func (r *testRunner) runTest( case <-time.After(timeout): // NB: We're adding the timeout failure intentionally without cancelling the context // to capture as much state as possible during artifact collection. - t.addFailure(0, "test timed out (%s)", timeout) + // + // Temporarily route all runtime assertion timeouts to test-eng while + // we gauge the frequency they occur and adjust test timeouts accordingly. + // TODO(darryl): once we are more confident in the stability of runtime + // assertions we can remove this. + if tests.UsingRuntimeAssertions(t) { + timeoutErr := registry.ErrorWithOwnership{ + Err: errors.Newf("test timed out (%s)", timeout), + Owner: registry.OwnerTestEng, + } + t.addFailure(0, "", timeoutErr) + } else { + t.addFailure(0, "test timed out (%s)", timeout) + } + // We suppress other failures from being surfaced to the top as the timeout is always going // to be the main error and subsequent errors (i.e. context cancelled) add noise. t.suppressFailures() diff --git a/pkg/cmd/roachtest/test_test.go b/pkg/cmd/roachtest/test_test.go index 72bd13ba2b63..196b81aec27e 100644 --- a/pkg/cmd/roachtest/test_test.go +++ b/pkg/cmd/roachtest/test_test.go @@ -32,6 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachprod/cloud" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/roachprod/vm" + "github.com/cockroachdb/cockroach/pkg/roachprod/vm/gce" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -324,6 +325,7 @@ func TestRunnerTestTimeout(t *testing.T) { Cluster: spec.MakeClusterSpec(0), CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), + CockroachBinary: registry.StandardCockroach, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { <-ctx.Done() }, @@ -335,7 +337,7 @@ func TestRunnerTestTimeout(t *testing.T) { } out := buf.String() - timeoutRE := regexp.MustCompile(`(?m)^.*test timed out \(.*\)$`) + timeoutRE := regexp.MustCompile(`(?m)^.*test timed out \(.*\)`) if !timeoutRE.MatchString(out) { t.Fatalf("unable to find \"timed out\" message:\n%s", out) } @@ -481,3 +483,52 @@ func TestNewCluster(t *testing.T) { }) } } + +// Regression test for: https://github.com/cockroachdb/cockroach/issues/129997 +// Tests that workload nodes are assigned the same default zone as the main CRDB cluster. +func TestGCESameDefaultZone(t *testing.T) { + ctx := context.Background() + factory := &clusterFactory{sem: make(chan struct{}, 1)} + cfg := clusterConfig{spec: spec.MakeClusterSpec(2, spec.WorkloadNode())} + setStatus := func(string) {} + + defer func() { + create = roachprod.Create + }() + + create = func(ctx context.Context, l *logger.Logger, username string, opts ...*cloud.ClusterCreateOpts) (retErr error) { + // Since we specified no zone for this cluster, roachtest should assign a default one for us. + // Check that it assigns the same default zone to both the CRDB cluster and the workload node. + require.Equal(t, len(opts), 2) + crdbZones := opts[0].ProviderOptsContainer[gce.ProviderName].(*gce.ProviderOpts).Zones + workloadZones := opts[1].ProviderOptsContainer[gce.ProviderName].(*gce.ProviderOpts).Zones + require.Equal(t, crdbZones, workloadZones) + // A bit of a workaround, we don't have a mock for registerCluster at this time which will panic if hit. + // Instead, just return an error to return early since we already tested the code paths we care about. + return &roachprod.ClusterAlreadyExistsError{} + } + + testCases := []struct { + name string + geo bool + createMock func(ctx context.Context, l *logger.Logger, username string, opts ...*cloud.ClusterCreateOpts) (retErr error) + }{ + { + name: "Separate GCE create calls for same cluster default to same zone", + geo: false, + }, + { + name: "Separate GCE create calls for same geo cluster default to same zones", + geo: true, + }, + } + + for _, c := range testCases { + cfg.spec.Geo = c.geo + t.Run(c.name, func(t *testing.T) { + for i := 0; i < 100; i++ { + _, _, _ = factory.newCluster(ctx, cfg, setStatus, true) + } + }) + } +} diff --git a/pkg/cmd/roachtest/testdata/regression.diffs b/pkg/cmd/roachtest/testdata/regression.diffs index 25b011375d35..603ba302a21d 100644 --- a/pkg/cmd/roachtest/testdata/regression.diffs +++ b/pkg/cmd/roachtest/testdata/regression.diffs @@ -2506,7 +2506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/int8.out --label= - 1259 + oid +------------ -+ 4294967092 ++ 4294967090 (1 row) -- bit operations @@ -4140,7 +4140,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float4.out --labe +create type xfloat4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create function xfloat4in(cstring) returns xfloat4 immutable strict language internal as 'int4in'; -NOTICE: return type xfloat4 is only a shell @@ -4155,7 +4155,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float4.out --labe +create type xfloat4 (input = xfloat4in, output = xfloat4out, like = float4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create cast (xfloat4 as float4) without function; +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -4537,7 +4537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float4.out --labe -drop cascades to cast from integer to xfloat4 +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/pg_lsn.out --label=/mnt/data1/postgres/src/test/regress/results/pg_lsn.out /mnt/data1/postgres/src/test/regress/expected/pg_lsn.out /mnt/data1/postgres/src/test/regress/results/pg_lsn.out --- /mnt/data1/postgres/src/test/regress/expected/pg_lsn.out +++ /mnt/data1/postgres/src/test/regress/results/pg_lsn.out @@ -4581,15 +4581,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/pg_lsn.out --labe -- Min/Max aggregation SELECT MIN(f1), MAX(f1) FROM PG_LSN_TBL; min | max -@@ -133,7 +114,7 @@ - SELECT '0/16AE7F7'::pg_lsn + 'NaN'::numeric; - ERROR: cannot add NaN to pg_lsn - SELECT '0/16AE7F7'::pg_lsn - 'NaN'::numeric; --ERROR: cannot subtract NaN from pg_lsn -+ERROR: cannot add NaN to pg_lsn - -- Check btree and hash opclasses - EXPLAIN (COSTS OFF) - SELECT DISTINCT (i || '/' || j)::pg_lsn f @@ -142,22 +123,11 @@ generate_series(1, 5) k WHERE i <= 10 AND j > 0 AND j <= 10 @@ -5369,7 +5360,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/enum.out --label= +CREATE DOMAIN rgb AS rainbow CHECK (VALUE IN ('red', 'green', 'blue')) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev SELECT 'red'::rgb; - rgb ------ @@ -5389,7 +5380,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/enum.out --label= +DROP DOMAIN rgb + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Arrays -- @@ -6119,7 +6110,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float8.out --labe +create type xfloat8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create function xfloat8in(cstring) returns xfloat8 immutable strict language internal as 'int8in'; -NOTICE: return type xfloat8 is only a shell @@ -6134,7 +6125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float8.out --labe +create type xfloat8 (input = xfloat8in, output = xfloat8out, like = float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create cast (xfloat8 as float8) without function; +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -6461,7 +6452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/float8.out --labe -drop cascades to cast from bigint to xfloat8 +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/numeric.out --label=/mnt/data1/postgres/src/test/regress/results/numeric.out /mnt/data1/postgres/src/test/regress/expected/numeric.out /mnt/data1/postgres/src/test/regress/results/numeric.out --- /mnt/data1/postgres/src/test/regress/expected/numeric.out +++ /mnt/data1/postgres/src/test/regress/results/numeric.out @@ -9620,7 +9611,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/numeric.out --lab ALTER TABLE num_variance SET (parallel_workers = 4); +ERROR: unimplemented: storage parameter "parallel_workers" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev SET LOCAL parallel_setup_cost = 0; +ERROR: current transaction is aborted, commands ignored until end of transaction block SET LOCAL max_parallel_workers_per_gather = 4; @@ -9651,7 +9642,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/numeric.out --lab ALTER TABLE num_variance SET (parallel_workers = 4); +ERROR: unimplemented: storage parameter "parallel_workers" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev SET LOCAL parallel_setup_cost = 0; +ERROR: current transaction is aborted, commands ignored until end of transaction block SET LOCAL max_parallel_workers_per_gather = 4; @@ -11998,7 +11989,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- + exclude using gist (room with =, during with &&), + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev insert into test_range_excl values(int4range(123, 123, '[]'), int4range(1, 1, '[]'), '[2010-01-02 10:00, 2010-01-02 11:00)'); +ERROR: relation "test_range_excl" does not exist @@ -12054,7 +12045,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create type bogus_float8range as range (subtype=float8, subtype_diff=float4mi) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select '[123.001, 5.e9)'::float8range @> 888.882::float8; - ?column? ----------- @@ -12084,14 +12075,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create domain mydomain as int4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create type mydomainrange as range(subtype=mydomain); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type mydomainrange as range(subtype=mydomain) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select '[4,50)'::mydomainrange @> 7::mydomain; - ?column? ----------- @@ -12108,7 +12099,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +drop domain mydomain + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain mydomain cascade; -NOTICE: drop cascades to type mydomainrange +ERROR: at or near "mydomain": syntax error: unimplemented: this syntax @@ -12116,7 +12107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +drop domain mydomain cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Test domains over range types -- @@ -12126,7 +12117,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create domain restrictedrange as int4range check (upper(value) < 10) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select '[4,5)'::restrictedrange @> 7; - ?column? ----------- @@ -12143,7 +12134,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +drop domain restrictedrange + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Test multiple range types over the same subtype -- @@ -12153,14 +12144,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create type textrange1 as range(subtype=text, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev create type textrange2 as range(subtype=text, collation="C"); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type textrange2 as range(subtype=text, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select textrange1('a','Z') @> 'b'::text; -ERROR: range lower bound must be less than or equal to range upper bound +ERROR: unknown function: textrange1() @@ -12307,7 +12298,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create type arrayrange as range (subtype=int4[]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select arrayrange(ARRAY[1,2], ARRAY[2,1]); - arrayrange -------------------- @@ -12342,7 +12333,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create type two_ints_range as range (subtype = two_ints) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev -- with debug_parallel_query on, this exercises tqueue.c's range remapping select *, row_to_json(upper(t)) as u from (values (two_ints_range(row(1,2), row(3,4))), @@ -12362,12 +12353,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +alter type two_ints add attribute c two_ints_range + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev drop type two_ints cascade; -NOTICE: drop cascades to type two_ints_range +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- -- Check behavior when subtype lacks a hash function -- @@ -12377,7 +12368,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create type cashrange as range (subtype = money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev set enable_sort = off; -- try to make it pick a hash setop implementation -select '(2,5)'::cashrange except select '(5,6)'::cashrange; - cashrange @@ -12468,7 +12459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- + returns table(l anyelement, u anyelement) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select * from table_succeed(int4range(1,11)); - l | u ----+---- @@ -12498,7 +12489,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangetypes.out -- +create function table_fail(i anyelement) returns table(i anyelement, r anyrange) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/numerology.out --label=/mnt/data1/postgres/src/test/regress/results/numerology.out /mnt/data1/postgres/src/test/regress/expected/numerology.out /mnt/data1/postgres/src/test/regress/results/numerology.out --- /mnt/data1/postgres/src/test/regress/expected/numerology.out +++ /mnt/data1/postgres/src/test/regress/results/numerology.out @@ -14567,7 +14558,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/strings.out --lab ALTER TABLE toasttest set (toast_tuple_target = 4080); +ERROR: unimplemented: storage parameter "toast_tuple_target" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev INSERT INTO toasttest values (repeat('1234567890',300)); INSERT INTO toasttest values (repeat('1234567890',300)); INSERT INTO toasttest values (repeat('1234567890',300)); @@ -15072,11 +15063,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/time.out --label= INSERT INTO TIME_TBL VALUES ('02:03 PST'); +ERROR: could not parse "02:03 PST" as type time: parsing as type time: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIME_TBL VALUES ('11:59 EDT'); +ERROR: could not parse "11:59 EDT" as type time: parsing as type time: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIME_TBL VALUES ('12:00'); INSERT INTO TIME_TBL VALUES ('12:01'); INSERT INTO TIME_TBL VALUES ('23:59'); @@ -16317,7 +16308,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +CREATE TABLE INET_TBL (c cidr, i inet) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.226/24'); +ERROR: relation "inet_tbl" does not exist INSERT INTO INET_TBL (c, i) VALUES ('192.168.1.0/26', '192.168.1.226'); @@ -16639,7 +16630,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i<<'192.168.1.0/24'::cidr + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev EXPLAIN (COSTS OFF) SELECT * FROM inet_tbl WHERE i<<='192.168.1.0/24'::cidr; - QUERY PLAN @@ -16670,7 +16661,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i<<='192.168.1.0/24'::cidr + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev EXPLAIN (COSTS OFF) SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >>= i; - QUERY PLAN @@ -16701,7 +16692,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >>= i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev EXPLAIN (COSTS OFF) SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >> i; - QUERY PLAN @@ -16729,7 +16720,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >> i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SET enable_seqscan TO on; +WARNING: setting session var "enable_seqscan" is a no-op DROP INDEX inet_idx1; @@ -16752,7 +16743,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -16769,7 +16760,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -16786,7 +16777,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -16800,7 +16791,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; - c | i ----+--- @@ -16811,7 +16802,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; - c | i --------------+------------- @@ -16830,7 +16821,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+---------------- @@ -16850,7 +16841,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+---------------- @@ -16862,7 +16853,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -16882,7 +16873,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -16901,7 +16892,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -16928,7 +16919,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- test index-only scans EXPLAIN (COSTS OFF) SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; @@ -16958,7 +16949,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SET enable_seqscan TO on; +WARNING: setting session var "enable_seqscan" is a no-op DROP INDEX inet_idx2; @@ -16995,7 +16986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -17012,7 +17003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -17029,7 +17020,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+------------------ @@ -17043,7 +17034,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; - c | i ----+--- @@ -17054,7 +17045,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; - c | i --------------+------------- @@ -17073,7 +17064,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+---------------- @@ -17093,7 +17084,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; - c | i -----------------+---------------- @@ -17105,7 +17096,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -17125,7 +17116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -17144,7 +17135,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; - c | i ---------------------+------------------ @@ -17171,7 +17162,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- test index-only scans EXPLAIN (COSTS OFF) SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; @@ -17201,7 +17192,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inet.out --label= +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev SET enable_seqscan TO on; +WARNING: setting session var "enable_seqscan" is a no-op DROP INDEX inet_idx3; @@ -17892,7 +17883,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/interval.out --la SET DATESTYLE = 'postgres'; +ERROR: unimplemented: only ISO style is supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41773/v24.2 ++See: https://go.crdb.dev/issue-v/41773/dev SET IntervalStyle to postgres_verbose; +ERROR: invalid value for parameter "IntervalStyle": "postgres_verbose" +HINT: Available values: postgres,iso_8601,sql_standard @@ -23528,14 +23519,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create domain mydomain as int4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create type mydomainrange as range(subtype=mydomain); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type mydomainrange as range(subtype=mydomain) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select '{[4,50)}'::mydomainmultirange @> 7::mydomain; - ?column? ----------- @@ -23550,7 +23541,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +drop domain mydomain cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Test domains over multirange types -- @@ -23560,7 +23551,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create domain restrictedmultirange as int4multirange check (upper(value) < 10) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select '{[4,5)}'::restrictedmultirange @> 7; - ?column? ----------- @@ -23577,7 +23568,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +drop domain restrictedmultirange + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev --- -- Check automatic naming of multiranges --- @@ -23587,7 +23578,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type intr as range(subtype=int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select intr_multirange(intr(1,10)); - intr_multirange ------------------ @@ -23607,7 +23598,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type intrange as range(subtype=int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev drop type intmultirange; create type intr_multirange as (x int, y int); create type intr as range(subtype=int); -- should fail @@ -23619,7 +23610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type intr as range(subtype=int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev drop type intr_multirange; -- -- Test multiple multirange types over the same subtype and manual naming of @@ -23633,7 +23624,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type textrange1 as range(subtype=text, multirange_type_name=int, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev -- should pass create type textrange1 as range(subtype=text, multirange_type_name=multirange_of_text, collation="C"); +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -23641,7 +23632,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type textrange1 as range(subtype=text, multirange_type_name=multirange_of_text, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev -- should pass, because existing _textrange1 is automatically renamed create type textrange2 as range(subtype=text, multirange_type_name=_textrange1, collation="C"); +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -23649,7 +23640,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type textrange2 as range(subtype=text, multirange_type_name=_textrange1, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev select multirange_of_text(textrange2('a','Z')); -- should fail -ERROR: function multirange_of_text(textrange2) does not exist -LINE 1: select multirange_of_text(textrange2('a','Z')); @@ -23858,7 +23849,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create type two_ints_range as range (subtype = two_ints) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev -- with debug_parallel_query on, this exercises tqueue.c's range remapping select *, row_to_json(upper(t)) as u from (values (two_ints_multirange(two_ints_range(row(1,2), row(3,4)))), @@ -23874,7 +23865,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o -NOTICE: drop cascades to type two_ints_range +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- -- Check behavior when subtype lacks a hash function -- @@ -23977,7 +23968,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create function mr_table_succeed(i anyelement, r anymultirange) returns table(i anyelement, r anymultirange) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select * from mr_table_succeed(123, int4multirange(int4range(1,11))); - i | r ------+---------- @@ -24018,7 +24009,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/multirangetypes.o +create function mr_table_fail(i anyelement) returns table(i anyelement, r anymultirange) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --label=/mnt/data1/postgres/src/test/regress/results/timestamp.out /mnt/data1/postgres/src/test/regress/expected/timestamp.out /mnt/data1/postgres/src/test/regress/results/timestamp.out --- /mnt/data1/postgres/src/test/regress/expected/timestamp.out +++ /mnt/data1/postgres/src/test/regress/results/timestamp.out @@ -24028,7 +24019,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu'); +ERROR: current transaction is aborted, commands ignored until end of transaction block SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'today'; @@ -24081,46 +24072,38 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l (1 row) SELECT count(*) AS two FROM TIMESTAMP_TBL WHERE d1 = timestamp(2) without time zone 'now'; -@@ -75,6 +67,7 @@ - -- Special values - INSERT INTO TIMESTAMP_TBL VALUES ('-infinity'); - INSERT INTO TIMESTAMP_TBL VALUES ('infinity'); -+ERROR: timestamp "294277-01-01T00:00:00Z" exceeds supported timestamp bounds - INSERT INTO TIMESTAMP_TBL VALUES ('epoch'); - SELECT timestamp 'infinity' = timestamp '+infinity' AS t; - t -@@ -84,12 +77,30 @@ +@@ -84,12 +76,30 @@ -- Postgres v6.0 standard output format INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- Variations on Postgres v6.1 standard output format INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- ISO 8601 format INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02'); INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05'); -@@ -101,494 +112,350 @@ +@@ -101,494 +111,354 @@ INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20'); -- POSIX format (note that the timezone abbrev is just decoration here) INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8'); @@ -24138,11 +24121,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- Variations for acceptable input formats INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800'); +ERROR: parsing as type timestamp: unexpected separator ':' for field Hour @@ -24158,19 +24141,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev set datestyle to ymd; INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC'); +ERROR: parsing as type timestamp: could not parse "97FEB10 5:32:01PM UTC" @@ -24224,7 +24207,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997'); +ERROR: parsing as type timestamp: unexpected separator ':' for field Hour +DETAIL: Wanted: [ Era Hour Minute Second Nanos Meridian TZHour TZMinute TZSecond ] @@ -24370,8 +24353,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l SELECT d1 FROM TIMESTAMP_TBL; - d1 ------------------------------ -- -infinity -- infinity ++ d1 ++--------------------- + -infinity + infinity - Thu Jan 01 00:00:00 1970 - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:01 1997 @@ -24436,9 +24421,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 - Mon Jan 01 17:32:01 2001 -(65 rows) -+ d1 -+------------------------ -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 @@ -24452,7 +24434,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(14 rows) ++(15 rows) -- Check behavior at the boundaries of the timestamp range SELECT '4714-11-24 00:00:00 BC'::timestamp; @@ -24488,7 +24470,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l WHERE d1 > timestamp without time zone '1997-01-02'; - d1 ----------------------------- -- infinity ++ d1 ++--------------------- + infinity - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:02 1997 @@ -24538,8 +24522,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 - Mon Jan 01 17:32:01 2001 -(49 rows) -+ d1 -+--------------------- + 1997-01-02 03:04:05 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 @@ -24551,13 +24533,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(11 rows) ++(12 rows) SELECT d1 FROM TIMESTAMP_TBL WHERE d1 < timestamp without time zone '1997-01-02'; - d1 ------------------------------ -- -infinity ++ d1 ++--------------------- + -infinity - Thu Jan 01 00:00:00 1970 - Tue Feb 16 17:32:01 0097 BC - Sat Feb 16 17:32:01 0097 @@ -24573,9 +24557,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Tue Dec 31 17:32:01 1996 - Wed Jan 01 17:32:01 1997 -(15 rows) -+ d1 -+------------------------ -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 +(2 rows) @@ -24593,8 +24574,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l WHERE d1 != timestamp without time zone '1997-01-02'; - d1 ------------------------------ -- -infinity -- infinity ++ d1 ++--------------------- + -infinity + infinity - Thu Jan 01 00:00:00 1970 - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:01 1997 @@ -24658,9 +24641,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 - Mon Jan 01 17:32:01 2001 -(64 rows) -+ d1 -+------------------------ -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 03:04:05 + 1997-02-10 17:32:01 @@ -24673,13 +24653,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(13 rows) ++(14 rows) SELECT d1 FROM TIMESTAMP_TBL WHERE d1 <= timestamp without time zone '1997-01-02'; - d1 ------------------------------ -- -infinity ++ d1 ++--------------------- + -infinity - Thu Jan 01 00:00:00 1970 - Thu Jan 02 00:00:00 1997 - Tue Feb 16 17:32:01 0097 BC @@ -24696,9 +24678,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Tue Dec 31 17:32:01 1996 - Wed Jan 01 17:32:01 1997 -(16 rows) -+ d1 -+------------------------ -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 +(3 rows) @@ -24707,7 +24686,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l WHERE d1 >= timestamp without time zone '1997-01-02'; - d1 ----------------------------- -- infinity ++ d1 ++--------------------- + infinity - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:01 1997 - Mon Feb 10 17:32:02 1997 @@ -24758,8 +24739,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 - Mon Jan 01 17:32:01 2001 -(50 rows) -+ d1 -+--------------------- + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 + 1997-02-10 17:32:01 @@ -24772,7 +24751,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(12 rows) ++(13 rows) SELECT d1 - timestamp without time zone '1997-01-02' AS diff FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; @@ -24861,7 +24840,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l (1 row) -- verify date_bin behaves the same as date_trunc for relevant intervals -@@ -608,17 +475,7 @@ +@@ -608,17 +478,7 @@ ('microsecond', '1 us') ) intervals (str, interval), (VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts); @@ -24880,7 +24859,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- case 2: BC dates, origin < input SELECT str, -@@ -635,17 +492,7 @@ +@@ -635,17 +495,7 @@ ('microsecond', '1 us') ) intervals (str, interval), (VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts); @@ -24899,7 +24878,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- case 3: AD dates, origin > input SELECT str, -@@ -662,17 +509,7 @@ +@@ -662,17 +512,7 @@ ('microsecond', '1 us') ) intervals (str, interval), (VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts); @@ -24918,7 +24897,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- case 4: BC dates, origin > input SELECT str, -@@ -689,17 +526,7 @@ +@@ -689,17 +529,7 @@ ('microsecond', '1 us') ) intervals (str, interval), (VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts); @@ -24937,7 +24916,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- bin timestamps into arbitrary intervals SELECT interval, -@@ -718,98 +545,42 @@ +@@ -718,98 +548,42 @@ ) intervals (interval), (VALUES (timestamp '2020-02-11 15:44:17.71393')) ts (ts), (VALUES (timestamp '2001-01-01')) origin (origin); @@ -25058,7 +25037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- DATE_PART (timestamp_part) SELECT d1 as "timestamp", -@@ -817,221 +588,51 @@ +@@ -817,221 +591,53 @@ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour, date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second FROM TIMESTAMP_TBL; @@ -25130,23 +25109,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 | 2000 | 12 | 31 | 17 | 32 | 1 - Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 17 | 32 | 1 -(65 rows) -+ timestamp | year | month | day | hour | minute | second -+------------------------+-------+-------+-----+------+--------+-------- -+ 4714-11-24 00:00:00 BC | -4713 | 11 | 24 | 0 | 0 | 0 -+ 1970-01-01 00:00:00 | 1970 | 1 | 1 | 0 | 0 | 0 -+ 1997-01-02 00:00:00 | 1997 | 1 | 2 | 0 | 0 | 0 -+ 1997-01-02 03:04:05 | 1997 | 1 | 2 | 3 | 4 | 5 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-06-10 17:32:01 | 1997 | 6 | 10 | 17 | 32 | 1 -+ 2001-09-22 18:19:20 | 2001 | 9 | 22 | 18 | 19 | 20 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 -+(14 rows) ++ timestamp | year | month | day | hour | minute | second ++---------------------+--------+-------+-----+------+--------+----------- ++ -infinity | -4714 | 11 | 23 | 0 | 0 | 0 ++ infinity | 294277 | 1 | 1 | 23 | 59 | 59.999999 ++ 1970-01-01 00:00:00 | 1970 | 1 | 1 | 0 | 0 | 0 ++ 1997-01-02 00:00:00 | 1997 | 1 | 2 | 0 | 0 | 0 ++ 1997-01-02 03:04:05 | 1997 | 1 | 2 | 3 | 4 | 5 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-06-10 17:32:01 | 1997 | 6 | 10 | 17 | 32 | 1 ++ 2001-09-22 18:19:20 | 2001 | 9 | 22 | 18 | 19 | 20 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01 | 1997 | 2 | 10 | 17 | 32 | 1 ++(15 rows) SELECT d1 as "timestamp", date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec, @@ -25295,27 +25275,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 | 2000 | 52 | 7 | 0 | 366 - Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 1 | 1 -(65 rows) -+ timestamp | isoyear | week | isodow | dow | doy -+------------------------+---------+------+--------+-----+----- -+ 4714-11-24 00:00:00 BC | -4713 | 48 | 1 | 1 | 328 -+ 1970-01-01 00:00:00 | 1970 | 1 | 4 | 4 | 1 -+ 1997-01-02 00:00:00 | 1997 | 1 | 4 | 4 | 2 -+ 1997-01-02 03:04:05 | 1997 | 1 | 4 | 4 | 2 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-06-10 17:32:01 | 1997 | 24 | 2 | 2 | 161 -+ 2001-09-22 18:19:20 | 2001 | 38 | 6 | 6 | 265 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 -+(14 rows) ++ timestamp | isoyear | week | isodow | dow | doy ++---------------------+---------+------+--------+-----+----- ++ -infinity | -4714 | 47 | 6 | 6 | 327 ++ infinity | 294277 | 1 | 1 | 1 | 1 ++ 1970-01-01 00:00:00 | 1970 | 1 | 4 | 4 | 1 ++ 1997-01-02 00:00:00 | 1997 | 1 | 4 | 4 | 2 ++ 1997-01-02 03:04:05 | 1997 | 1 | 4 | 4 | 2 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-06-10 17:32:01 | 1997 | 24 | 2 | 2 | 161 ++ 2001-09-22 18:19:20 | 2001 | 38 | 6 | 6 | 265 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01 | 1997 | 7 | 1 | 1 | 41 ++(15 rows) SELECT d1 as "timestamp", date_part( 'decade', d1) AS decade, -@@ -1040,74 +641,23 @@ +@@ -1040,74 +646,24 @@ round(date_part( 'julian', d1)) AS julian, date_part( 'epoch', d1) AS epoch FROM TIMESTAMP_TBL; @@ -25387,27 +25368,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 | 200 | 20 | 2 | 2451911 | 978283921 - Mon Jan 01 17:32:01 2001 | 200 | 21 | 3 | 2451912 | 978370321 -(65 rows) -+ timestamp | decade | century | millennium | julian | epoch -+------------------------+--------+---------+------------+--------------+------------------------ -+ 4714-11-24 00:00:00 BC | -472 | -48 | -5 | 0 | -7.952618389194932e+09 -+ 1970-01-01 00:00:00 | 197 | 20 | 2 | 2.440588e+06 | 0 -+ 1997-01-02 00:00:00 | 199 | 20 | 2 | 2.450451e+06 | 8.521632e+08 -+ 1997-01-02 03:04:05 | 199 | 20 | 2 | 2.450451e+06 | 8.52174245e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-06-10 17:32:01 | 199 | 20 | 2 | 2.450611e+06 | 8.65963921e+08 -+ 2001-09-22 18:19:20 | 200 | 21 | 3 | 2.452176e+06 | 1.00118276e+09 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 -+(14 rows) ++ timestamp | decade | century | millennium | julian | epoch ++---------------------+--------+---------+------------+----------------+------------------------ ++ -infinity | -472 | -48 | -5 | -366 | -7.984240789194932e+09 ++ infinity | 29427 | 2943 | 295 | 1.09203529e+08 | 9.46065545224191e+08 ++ 1970-01-01 00:00:00 | 197 | 20 | 2 | 2.440588e+06 | 0 ++ 1997-01-02 00:00:00 | 199 | 20 | 2 | 2.450451e+06 | 8.521632e+08 ++ 1997-01-02 03:04:05 | 199 | 20 | 2 | 2.450451e+06 | 8.52174245e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-06-10 17:32:01 | 199 | 20 | 2 | 2.450611e+06 | 8.65963921e+08 ++ 2001-09-22 18:19:20 | 200 | 21 | 3 | 2.452176e+06 | 1.00118276e+09 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 199 | 20 | 2 | 2.450491e+06 | 8.55595921e+08 ++(15 rows) -- extract implementation is mostly the same as date_part, so only -- test a few cases for additional coverage. -@@ -1118,260 +668,104 @@ +@@ -1118,260 +674,108 @@ round(extract(julian from d1)) AS julian, extract(epoch from d1) AS epoch FROM TIMESTAMP_TBL; @@ -25479,23 +25461,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - Sun Dec 31 17:32:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451911 | 978283921.000000 - Mon Jan 01 17:32:01 2001 | 1000000 | 1000.000 | 1.000000 | 2451912 | 978370321.000000 -(65 rows) -+ timestamp | microseconds | milliseconds | seconds | julian | epoch -+------------------------+--------------+--------------+---------+--------------+------------------------ -+ 4714-11-24 00:00:00 BC | 0 | 0 | 0 | 0 | -7.952618389194932e+09 -+ 1970-01-01 00:00:00 | 0 | 0 | 0 | 2.440588e+06 | 0 -+ 1997-01-02 00:00:00 | 0 | 0 | 0 | 2.450451e+06 | 8.521632e+08 -+ 1997-01-02 03:04:05 | 5e+06 | 5000 | 5 | 2.450451e+06 | 8.52174245e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-06-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450611e+06 | 8.65963921e+08 -+ 2001-09-22 18:19:20 | 2e+07 | 20000 | 20 | 2.452176e+06 | 1.00118276e+09 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 -+(14 rows) ++ timestamp | microseconds | milliseconds | seconds | julian | epoch ++---------------------+---------------+--------------+-----------+----------------+------------------------ ++ -infinity | 0 | 0 | 0 | -366 | -7.984240789194932e+09 ++ infinity | 5.9999999e+07 | 59999.999 | 59.999999 | 1.09203529e+08 | 9.46065545224191e+08 ++ 1970-01-01 00:00:00 | 0 | 0 | 0 | 2.440588e+06 | 0 ++ 1997-01-02 00:00:00 | 0 | 0 | 0 | 2.450451e+06 | 8.521632e+08 ++ 1997-01-02 03:04:05 | 5e+06 | 5000 | 5 | 2.450451e+06 | 8.52174245e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-06-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450611e+06 | 8.65963921e+08 ++ 2001-09-22 18:19:20 | 2e+07 | 20000 | 20 | 2.452176e+06 | 1.00118276e+09 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++ 1997-02-10 17:32:01 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55595921e+08 ++(15 rows) -- value near upper bound uses special case in code SELECT date_part('epoch', '294270-01-01 00:00:00'::timestamp); @@ -25612,7 +25595,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec - MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan -(65 rows) -+ monday Monday monday mon Mon mon NOVEMBER November november XI NOV Nov nov ++ saturday Saturday saturday sat Sat sat NOVEMBER November november XI NOV Nov nov ++ monday Monday monday mon Mon mon JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan @@ -25626,7 +25610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + monday Monday monday mon Mon mon FEBRUARY February february II FEB Feb feb + monday Monday monday mon Mon mon FEBRUARY February february II FEB Feb feb + monday Monday monday mon Mon mon FEBRUARY February february II FEB Feb feb -+(14 rows) ++(15 rows) SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') FROM TIMESTAMP_TBL; @@ -25700,7 +25684,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -(65 rows) + to_char +------------------------------------------------------------- -+ monday Monday monday NOVEMBER November november XI ++ saturday Saturday saturday NOVEMBER November november XI ++ monday Monday monday JANUARY January january I + thursday Thursday thursday JANUARY January january I + thursday Thursday thursday JANUARY January january I + thursday Thursday thursday JANUARY January january I @@ -25714,7 +25699,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l + monday Monday monday FEBRUARY February february II + monday Monday monday FEBRUARY February february II + monday Monday monday FEBRUARY February february II -+(14 rows) ++(15 rows) SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') FROM TIMESTAMP_TBL; @@ -25722,9 +25707,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l --------------------------------------------------- - - -+ to_char -+------------------------------------------------- -+ 4,714 4714 714 14 4 -48 4 11 47 328 24 2 0 ++ to_char ++--------------------------------------------------------- ++ 4,715 4715 715 15 5 -48 4 11 47 327 23 7 -366 ++ 294,277 294277 277 77 7 2943 1 01 01 001 01 2 109203528 1,970 1970 970 70 0 20 1 01 01 001 01 5 2440588 - 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 - 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 @@ -25735,7 +25721,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 -@@ -1380,69 +774,18 @@ +@@ -1380,69 +784,19 @@ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610 2,001 2001 001 01 1 21 3 09 38 265 22 7 2452175 @@ -25788,7 +25774,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910 - 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') FROM TIMESTAMP_TBL; @@ -25796,9 +25782,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -------------------------------------------------- - - -+ to_char -+------------------------------------------------ -+ 4,714 4714 714 14 4 -48 4 11 47 328 24 2 0 ++ to_char ++---------------------------------------------------- ++ 4,715 4715 715 15 5 -48 4 11 47 327 23 7 -366 ++ 294,277 294277 277 77 7 2943 1 1 1 1 1 2 109203528 1,970 1970 970 70 0 20 1 1 1 1 1 5 2440588 - 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 - 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 @@ -25809,7 +25796,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 -@@ -1451,69 +794,18 @@ +@@ -1451,69 +805,19 @@ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610 2,001 2001 1 1 1 21 3 9 38 265 22 7 2452175 @@ -25862,7 +25849,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910 - 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS') FROM TIMESTAMP_TBL; @@ -25877,11 +25864,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 ++ 11 11 23 59 59 86399 + 12 12 00 00 00 0 12 12 00 00 00 0 03 03 03 04 05 11045 05 05 17 32 01 63121 -@@ -1522,69 +814,18 @@ +@@ -1522,69 +826,19 @@ 05 05 17 32 01 63121 05 05 17 32 01 63121 06 06 18 19 20 65960 @@ -25910,6 +25898,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 +- 05 05 17 32 01 63121 - 05 05 17 32 01 63121 05 05 17 32 01 63121 05 05 17 32 01 63121 @@ -25932,9 +25921,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 -- 05 05 17 32 01 63121 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') FROM TIMESTAMP_TBL; @@ -25949,11 +25937,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" ++ HH:MI:SS is 11:59:59 "text between quote marks" + HH:MI:SS is 12:00:00 "text between quote marks" HH:MI:SS is 12:00:00 "text between quote marks" HH:MI:SS is 03:04:05 "text between quote marks" HH:MI:SS is 05:32:01 "text between quote marks" -@@ -1593,69 +834,18 @@ +@@ -1593,69 +847,19 @@ HH:MI:SS is 05:32:01 "text between quote marks" HH:MI:SS is 05:32:01 "text between quote marks" HH:MI:SS is 06:19:20 "text between quote marks" @@ -25975,6 +25964,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 06:32:01 "text between quote marks" +- HH:MI:SS is 05:32:01 "text between quote marks" +- HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" HH:MI:SS is 05:32:01 "text between quote marks" HH:MI:SS is 05:32:01 "text between quote marks" @@ -26003,10 +25994,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" -- HH:MI:SS is 05:32:01 "text between quote marks" -- HH:MI:SS is 05:32:01 "text between quote marks" -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'HH24--text--MI--text--SS') FROM TIMESTAMP_TBL; @@ -26021,11 +26010,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 17--text--32--text--01 - 17--text--32--text--01 - 17--text--32--text--01 ++ 23--text--59--text--59 + 00--text--00--text--00 00--text--00--text--00 03--text--04--text--05 17--text--32--text--01 -@@ -1664,69 +854,18 @@ +@@ -1664,69 +868,19 @@ 17--text--32--text--01 17--text--32--text--01 18--text--19--text--20 @@ -26040,16 +26030,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 17--text--32--text--01 - 17--text--32--text--01 - 17--text--32--text--01 +- 17--text--32--text--01 +- 17--text--32--text--01 +- 17--text--32--text--01 - 17--text--32--text--01 17--text--32--text--01 17--text--32--text--01 +- 18--text--32--text--01 17--text--32--text--01 17--text--32--text--01 - 17--text--32--text--01 -- 18--text--32--text--01 -- 17--text--32--text--01 -- 17--text--32--text--01 -- 17--text--32--text--01 - 17--text--32--text--01 - 17--text--32--text--01 - 17--text--32--text--01 @@ -26078,15 +26068,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 17--text--32--text--01 - 17--text--32--text--01 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'YYYYTH YYYYth Jth') FROM TIMESTAMP_TBL; - to_char - ------------------------- +- to_char +-------------------------- - - -+ 4714TH 4714th 0th ++ to_char ++------------------------------- ++ 4715TH 4715th -366th ++ 294277TH 294277th 109203528th 1970TH 1970th 2440588th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th @@ -26097,7 +26090,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1997TH 1997th 2450451st 1997TH 1997th 2450451st 1997TH 1997th 2450490th -@@ -1735,69 +874,18 @@ +@@ -1735,69 +889,19 @@ 1997TH 1997th 2450490th 1997TH 1997th 2450610th 2001ST 2001st 2452175th @@ -26109,15 +26102,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450610th - 1997TH 1997th 2450490th - 1997TH 1997th 2450491st @@ -26150,15 +26143,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2000TH 2000th 2451910th - 2001ST 2001st 2451911th -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') FROM TIMESTAMP_TBL; - to_char - --------------------------------------------------------------------- +- to_char +---------------------------------------------------------------------- - - -+ 4714 B.C. 4714 b.c. 4714 bc 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am ++ to_char ++--------------------------------------------------------------------------- ++ 4715 B.C. 4715 b.c. 4715 bc 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am ++ 294277 A.D. 294277 a.d. 294277 ad 11:59:59 P.M. 11:59:59 p.m. 11:59:59 pm 1970 A.D. 1970 a.d. 1970 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm @@ -26169,7 +26165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1997 A.D. 1997 a.d. 1997 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am 1997 A.D. 1997 a.d. 1997 ad 03:04:05 A.M. 03:04:05 a.m. 03:04:05 am 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -@@ -1806,69 +894,18 @@ +@@ -1806,69 +910,19 @@ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm 2001 A.D. 2001 a.d. 2001 ad 06:19:20 P.M. 06:19:20 p.m. 06:19:20 pm @@ -26186,10 +26182,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm @@ -26213,24 +26209,27 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1999 A.D. 1999 a.d. 1999 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 2001 A.D. 2001 a.d. 2001 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID') FROM TIMESTAMP_TBL; - to_char - ------------------------ +- to_char +------------------------- - - -+ 4714 714 14 4 48 330 1 ++ to_char ++-------------------------- ++ 4715 715 15 5 47 328 6 ++ 294277 277 77 7 01 001 1 1970 970 70 0 01 004 4 - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 @@ -26241,7 +26240,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1997 997 97 7 01 004 4 1997 997 97 7 01 004 4 1997 997 97 7 07 043 1 -@@ -1877,69 +914,18 @@ +@@ -1877,69 +931,19 @@ 1997 997 97 7 07 043 1 1997 997 97 7 24 163 2 2001 001 01 1 38 265 6 @@ -26252,6 +26251,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2000 000 00 0 11 073 3 - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 +- 1997 997 97 7 07 043 1 +- 1997 997 97 7 07 043 1 +- 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 1997 997 97 7 07 043 1 1997 997 97 7 07 043 1 @@ -26259,9 +26261,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 -- 1997 997 97 7 07 043 1 -- 1997 997 97 7 07 043 1 -- 1997 997 97 7 07 043 1 - 1997 997 97 7 24 163 2 - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 044 2 @@ -26294,7 +26293,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2000 000 00 0 52 364 7 - 2001 001 01 1 01 001 1 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') FROM TIMESTAMP_TBL; @@ -26302,7 +26301,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l ------------------------ - - -+ 4714 714 14 4 48 330 1 ++ 4715 715 15 5 47 328 6 ++ 294277 277 77 7 1 1 1 1970 970 70 0 1 4 4 - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 @@ -26313,7 +26313,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 1997 997 97 7 1 4 4 1997 997 97 7 1 4 4 1997 997 97 7 7 43 1 -@@ -1948,55 +934,11 @@ +@@ -1948,55 +952,11 @@ 1997 997 97 7 7 43 1 1997 997 97 7 24 163 2 2001 1 1 1 38 265 6 @@ -26326,14 +26326,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 +- 1997 997 97 7 7 43 1 +- 1997 997 97 7 7 43 1 +- 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 1997 997 97 7 7 43 1 1997 997 97 7 7 43 1 1997 997 97 7 7 43 1 1997 997 97 7 7 43 1 -- 1997 997 97 7 7 43 1 -- 1997 997 97 7 7 43 1 -- 1997 997 97 7 7 43 1 - 1997 997 97 7 24 163 2 - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 44 2 @@ -26366,11 +26366,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l - 2000 0 0 0 52 364 7 - 2001 1 1 1 1 1 1 -(65 rows) -+(14 rows) ++(15 rows) SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US') FROM (VALUES -@@ -2005,14 +947,7 @@ +@@ -2005,14 +965,7 @@ ('2018-11-02 12:34:56.78901'), ('2018-11-02 12:34:56.78901234') ) d(d); @@ -26386,7 +26386,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l -- Roman months, with upper and lower case. SELECT i, to_char(i * interval '1mon', 'rm'), -@@ -2020,19 +955,19 @@ +@@ -2020,19 +973,19 @@ FROM generate_series(-13, 13) i; i | to_char | to_char -----+---------+--------- @@ -26418,7 +26418,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l 0 | | 1 | i | I 2 | ii | II -@@ -2051,54 +986,54 @@ +@@ -2051,54 +1004,54 @@ -- timestamp numeric fields constructor SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887); @@ -26510,7 +26510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamp.out --l (28 rows) -- the LIMIT should allow this to terminate in a reasonable amount of time -@@ -2106,22 +1041,22 @@ +@@ -2106,22 +1059,22 @@ select generate_series('2022-01-01 00:00'::timestamp, 'infinity'::timestamp, '1 month'::interval) limit 10; @@ -26555,7 +26555,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu'); +ERROR: current transaction is aborted, commands ignored until end of transaction block SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'today'; @@ -26622,46 +26622,38 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - (1 row) SELECT count(*) AS two FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp(2) with time zone 'now'; -@@ -86,6 +70,7 @@ - -- Special values - INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity'); - INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity'); -+ERROR: timestamp "294277-01-01T00:00:00Z" exceeds supported timestamp bounds - INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch'); - SELECT timestamptz 'infinity' = timestamptz '+infinity' AS t; - t -@@ -95,12 +80,30 @@ +@@ -95,12 +79,30 @@ -- Postgres v6.0 standard output format INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- Variations on Postgres v6.1 standard output format INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- ISO 8601 format INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02'); INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05'); -@@ -112,613 +115,445 @@ +@@ -112,613 +114,449 @@ INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20'); -- POSIX format (note that the timezone abbrev is just decoration here) INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8'); @@ -26679,11 +26671,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev -- Variations for acceptable input formats INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800'); +ERROR: parsing as type timestamp: unexpected separator ':' for field Hour @@ -26699,19 +26691,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev set datestyle to ymd; INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC'); +ERROR: parsing as type timestamp: could not parse "97FEB10 5:32:01PM UTC" @@ -26824,7 +26816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997'); +ERROR: parsing as type timestamp: unexpected separator ':' for field Hour +DETAIL: Wanted: [ Era Hour Minute Second Nanos Meridian TZHour TZMinute TZSecond ] @@ -26998,7 +26990,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz; - timestamptz ------------------------------- @@ -27007,12 +26999,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT d1 FROM TIMESTAMPTZ_TBL; - d1 ---------------------------------- -- -infinity -- infinity ++ d1 ++------------------------ + -infinity + infinity - Wed Dec 31 16:00:00 1969 PST - Mon Feb 10 17:32:01 1997 PST - Mon Feb 10 17:32:01 1997 PST @@ -27078,9 +27072,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST - Mon Jan 01 17:32:01 2001 PST -(66 rows) -+ d1 -+--------------------------- -+ 4714-11-23 16:00:00-08 BC + 1969-12-31 16:00:00-08 + 1997-01-02 00:00:00-08 + 1997-01-02 03:04:05-08 @@ -27095,7 +27086,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - + 1997-02-10 09:32:01-08 + 1997-02-10 14:32:01-08 + 1997-07-10 14:32:01-07 -+(15 rows) ++(16 rows) -- Check behavior at the boundaries of the timestamp range SELECT '4714-11-24 00:00:00+00 BC'::timestamptz; @@ -27124,7 +27115,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range -ERROR: timestamp out of range: "4714-11-23 23:59:59+00 BC" -LINE 1: SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; @@ -27163,116 +27154,117 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - WHERE d1 > timestamp with time zone '1997-01-02'; - d1 --------------------------------- -- infinity -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:02 1997 PST -- Mon Feb 10 17:32:01.4 1997 PST -- Mon Feb 10 17:32:01.5 1997 PST -- Mon Feb 10 17:32:01.6 1997 PST -- Thu Jan 02 03:04:05 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Tue Jun 10 17:32:01 1997 PDT -- Sat Sep 22 18:19:20 2001 PDT -- Wed Mar 15 08:14:01 2000 PST -- Wed Mar 15 04:14:02 2000 PST -- Wed Mar 15 02:14:03 2000 PST -- Wed Mar 15 03:14:04 2000 PST -- Wed Mar 15 01:14:05 2000 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:00 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 17:32:01 1997 PST -- Mon Feb 10 09:32:01 1997 PST -- Mon Feb 10 09:32:01 1997 PST -- Mon Feb 10 09:32:01 1997 PST -- Mon Feb 10 14:32:01 1997 PST -- Thu Jul 10 14:32:01 1997 PDT -- Tue Jun 10 18:32:01 1997 PDT -- Mon Feb 10 17:32:01 1997 PST -- Tue Feb 11 17:32:01 1997 PST -- Wed Feb 12 17:32:01 1997 PST -- Thu Feb 13 17:32:01 1997 PST -- Fri Feb 14 17:32:01 1997 PST -- Sat Feb 15 17:32:01 1997 PST -- Sun Feb 16 17:32:01 1997 PST -- Sun Feb 16 17:32:01 1997 PST -- Sat Feb 16 17:32:01 2097 PST -- Fri Feb 28 17:32:01 1997 PST -- Sat Mar 01 17:32:01 1997 PST -- Tue Dec 30 17:32:01 1997 PST -- Wed Dec 31 17:32:01 1997 PST -- Fri Dec 31 17:32:01 1999 PST -- Sat Jan 01 17:32:01 2000 PST -- Sun Dec 31 17:32:01 2000 PST -- Mon Jan 01 17:32:01 2001 PST --(50 rows) -+ d1 -+------------------------ -+ 1997-01-02 03:04:05-08 -+ 1997-02-10 17:32:01-08 -+ 1997-02-10 17:32:01-08 -+ 1997-02-10 17:32:01-08 -+ 1997-02-10 17:32:01-08 -+ 1997-06-10 17:32:01-07 -+ 2001-09-22 18:19:20-07 -+ 1997-02-10 17:32:01-08 -+ 1997-02-10 09:32:01-08 -+ 1997-02-10 09:32:01-08 -+ 1997-02-10 14:32:01-08 -+ 1997-07-10 14:32:01-07 -+(12 rows) - - SELECT d1 FROM TIMESTAMPTZ_TBL - WHERE d1 < timestamp with time zone '1997-01-02'; -- d1 ----------------------------------- -- -infinity -- Wed Dec 31 16:00:00 1969 PST -- Tue Feb 16 17:32:01 0097 PST BC -- Sat Feb 16 17:32:01 0097 PST -- Thu Feb 16 17:32:01 0597 PST -- Tue Feb 16 17:32:01 1097 PST -- Sat Feb 16 17:32:01 1697 PST -- Thu Feb 16 17:32:01 1797 PST -- Tue Feb 16 17:32:01 1897 PST -- Wed Feb 28 17:32:01 1996 PST -- Thu Feb 29 17:32:01 1996 PST -- Fri Mar 01 17:32:01 1996 PST -- Mon Dec 30 17:32:01 1996 PST -- Tue Dec 31 17:32:01 1996 PST -- Wed Jan 01 17:32:01 1997 PST --(15 rows) -+ d1 -+--------------------------- -+ 4714-11-23 16:00:00-08 BC -+ 1969-12-31 16:00:00-08 -+(2 rows) - - SELECT d1 FROM TIMESTAMPTZ_TBL - WHERE d1 = timestamp with time zone '1997-01-02'; -- d1 -------------------------------- -- Thu Jan 02 00:00:00 1997 PST + d1 +------------------------ -+ 1997-01-02 00:00:00-08 - (1 row) - - SELECT d1 FROM TIMESTAMPTZ_TBL - WHERE d1 != timestamp with time zone '1997-01-02'; -- d1 ----------------------------------- -- -infinity -- infinity -- Wed Dec 31 16:00:00 1969 PST + infinity +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:02 1997 PST +- Mon Feb 10 17:32:01.4 1997 PST +- Mon Feb 10 17:32:01.5 1997 PST +- Mon Feb 10 17:32:01.6 1997 PST +- Thu Jan 02 03:04:05 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Tue Jun 10 17:32:01 1997 PDT +- Sat Sep 22 18:19:20 2001 PDT +- Wed Mar 15 08:14:01 2000 PST +- Wed Mar 15 04:14:02 2000 PST +- Wed Mar 15 02:14:03 2000 PST +- Wed Mar 15 03:14:04 2000 PST +- Wed Mar 15 01:14:05 2000 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:00 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 17:32:01 1997 PST +- Mon Feb 10 09:32:01 1997 PST +- Mon Feb 10 09:32:01 1997 PST +- Mon Feb 10 09:32:01 1997 PST +- Mon Feb 10 14:32:01 1997 PST +- Thu Jul 10 14:32:01 1997 PDT +- Tue Jun 10 18:32:01 1997 PDT +- Mon Feb 10 17:32:01 1997 PST +- Tue Feb 11 17:32:01 1997 PST +- Wed Feb 12 17:32:01 1997 PST +- Thu Feb 13 17:32:01 1997 PST +- Fri Feb 14 17:32:01 1997 PST +- Sat Feb 15 17:32:01 1997 PST +- Sun Feb 16 17:32:01 1997 PST +- Sun Feb 16 17:32:01 1997 PST +- Sat Feb 16 17:32:01 2097 PST +- Fri Feb 28 17:32:01 1997 PST +- Sat Mar 01 17:32:01 1997 PST +- Tue Dec 30 17:32:01 1997 PST +- Wed Dec 31 17:32:01 1997 PST +- Fri Dec 31 17:32:01 1999 PST +- Sat Jan 01 17:32:01 2000 PST +- Sun Dec 31 17:32:01 2000 PST +- Mon Jan 01 17:32:01 2001 PST +-(50 rows) ++ 1997-01-02 03:04:05-08 ++ 1997-02-10 17:32:01-08 ++ 1997-02-10 17:32:01-08 ++ 1997-02-10 17:32:01-08 ++ 1997-02-10 17:32:01-08 ++ 1997-06-10 17:32:01-07 ++ 2001-09-22 18:19:20-07 ++ 1997-02-10 17:32:01-08 ++ 1997-02-10 09:32:01-08 ++ 1997-02-10 09:32:01-08 ++ 1997-02-10 14:32:01-08 ++ 1997-07-10 14:32:01-07 ++(13 rows) + + SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 < timestamp with time zone '1997-01-02'; +- d1 +---------------------------------- ++ d1 ++------------------------ + -infinity +- Wed Dec 31 16:00:00 1969 PST +- Tue Feb 16 17:32:01 0097 PST BC +- Sat Feb 16 17:32:01 0097 PST +- Thu Feb 16 17:32:01 0597 PST +- Tue Feb 16 17:32:01 1097 PST +- Sat Feb 16 17:32:01 1697 PST +- Thu Feb 16 17:32:01 1797 PST +- Tue Feb 16 17:32:01 1897 PST +- Wed Feb 28 17:32:01 1996 PST +- Thu Feb 29 17:32:01 1996 PST +- Fri Mar 01 17:32:01 1996 PST +- Mon Dec 30 17:32:01 1996 PST +- Tue Dec 31 17:32:01 1996 PST +- Wed Jan 01 17:32:01 1997 PST +-(15 rows) ++ 1969-12-31 16:00:00-08 ++(2 rows) + + SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 = timestamp with time zone '1997-01-02'; +- d1 +------------------------------- +- Thu Jan 02 00:00:00 1997 PST ++ d1 ++------------------------ ++ 1997-01-02 00:00:00-08 + (1 row) + + SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 != timestamp with time zone '1997-01-02'; +- d1 +---------------------------------- ++ d1 ++------------------------ + -infinity + infinity +- Wed Dec 31 16:00:00 1969 PST - Mon Feb 10 17:32:01 1997 PST - Mon Feb 10 17:32:01 1997 PST - Mon Feb 10 17:32:02 1997 PST @@ -27336,9 +27328,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST - Mon Jan 01 17:32:01 2001 PST -(65 rows) -+ d1 -+--------------------------- -+ 4714-11-23 16:00:00-08 BC + 1969-12-31 16:00:00-08 + 1997-01-02 03:04:05-08 + 1997-02-10 17:32:01-08 @@ -27352,13 +27341,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - + 1997-02-10 09:32:01-08 + 1997-02-10 14:32:01-08 + 1997-07-10 14:32:01-07 -+(14 rows) ++(15 rows) SELECT d1 FROM TIMESTAMPTZ_TBL WHERE d1 <= timestamp with time zone '1997-01-02'; - d1 ---------------------------------- -- -infinity ++ d1 ++------------------------ + -infinity - Wed Dec 31 16:00:00 1969 PST - Thu Jan 02 00:00:00 1997 PST - Tue Feb 16 17:32:01 0097 PST BC @@ -27375,9 +27366,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Tue Dec 31 17:32:01 1996 PST - Wed Jan 01 17:32:01 1997 PST -(16 rows) -+ d1 -+--------------------------- -+ 4714-11-23 16:00:00-08 BC + 1969-12-31 16:00:00-08 + 1997-01-02 00:00:00-08 +(3 rows) @@ -27386,7 +27374,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - WHERE d1 >= timestamp with time zone '1997-01-02'; - d1 --------------------------------- -- infinity ++ d1 ++------------------------ + infinity - Mon Feb 10 17:32:01 1997 PST - Mon Feb 10 17:32:01 1997 PST - Mon Feb 10 17:32:02 1997 PST @@ -27438,8 +27428,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST - Mon Jan 01 17:32:01 2001 PST -(51 rows) -+ d1 -+------------------------ + 1997-01-02 00:00:00-08 + 1997-01-02 03:04:05-08 + 1997-02-10 17:32:01-08 @@ -27453,7 +27441,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - + 1997-02-10 09:32:01-08 + 1997-02-10 14:32:01-08 + 1997-07-10 14:32:01-07 -+(13 rows) ++(14 rows) SELECT d1 - timestamp with time zone '1997-01-02' AS diff FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; @@ -27571,7 +27559,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -- verify date_bin behaves the same as date_trunc for relevant intervals SELECT str, -@@ -734,16 +569,7 @@ +@@ -734,16 +572,7 @@ ('microsecond', '1 us') ) intervals (str, interval), (VALUES (timestamptz '2020-02-29 15:44:17.71393+00')) ts (ts); @@ -27589,7 +27577,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -- bin timestamps into arbitrary intervals SELECT interval, -@@ -762,98 +588,42 @@ +@@ -762,98 +591,42 @@ ) intervals (interval), (VALUES (timestamptz '2020-02-11 15:44:17.71393')) ts (ts), (VALUES (timestamptz '2001-01-01')) origin (origin); @@ -27711,7 +27699,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -- DATE_PART (timestamptz_part) SELECT d1 as timestamptz, -@@ -861,224 +631,53 @@ +@@ -861,224 +634,55 @@ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour, date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second FROM TIMESTAMPTZ_TBL; @@ -27784,24 +27772,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST | 2000 | 12 | 31 | 17 | 32 | 1 - Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 17 | 32 | 1 -(66 rows) -+ timestamptz | year | month | day | hour | minute | second -+---------------------------+-------+-------+-----+------+--------+-------- -+ 4714-11-23 16:00:00-08 BC | -4713 | 11 | 23 | 16 | 0 | 0 -+ 1969-12-31 16:00:00-08 | 1969 | 12 | 31 | 16 | 0 | 0 -+ 1997-01-02 00:00:00-08 | 1997 | 1 | 2 | 0 | 0 | 0 -+ 1997-01-02 03:04:05-08 | 1997 | 1 | 2 | 3 | 4 | 5 -+ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-06-10 17:32:01-07 | 1997 | 6 | 10 | 17 | 32 | 1 -+ 2001-09-22 18:19:20-07 | 2001 | 9 | 22 | 18 | 19 | 20 -+ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 -+ 1997-02-10 09:32:01-08 | 1997 | 2 | 10 | 9 | 32 | 1 -+ 1997-02-10 09:32:01-08 | 1997 | 2 | 10 | 9 | 32 | 1 -+ 1997-02-10 14:32:01-08 | 1997 | 2 | 10 | 14 | 32 | 1 -+ 1997-07-10 14:32:01-07 | 1997 | 7 | 10 | 14 | 32 | 1 -+(15 rows) ++ timestamptz | year | month | day | hour | minute | second ++------------------------+--------+-------+-----+------+--------+----------- ++ -infinity | -4714 | 11 | 22 | 16 | 0 | 0 ++ infinity | 294277 | 1 | 1 | 15 | 59 | 59.999999 ++ 1969-12-31 16:00:00-08 | 1969 | 12 | 31 | 16 | 0 | 0 ++ 1997-01-02 00:00:00-08 | 1997 | 1 | 2 | 0 | 0 | 0 ++ 1997-01-02 03:04:05-08 | 1997 | 1 | 2 | 3 | 4 | 5 ++ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-06-10 17:32:01-07 | 1997 | 6 | 10 | 17 | 32 | 1 ++ 2001-09-22 18:19:20-07 | 2001 | 9 | 22 | 18 | 19 | 20 ++ 1997-02-10 17:32:01-08 | 1997 | 2 | 10 | 17 | 32 | 1 ++ 1997-02-10 09:32:01-08 | 1997 | 2 | 10 | 9 | 32 | 1 ++ 1997-02-10 09:32:01-08 | 1997 | 2 | 10 | 9 | 32 | 1 ++ 1997-02-10 14:32:01-08 | 1997 | 2 | 10 | 14 | 32 | 1 ++ 1997-07-10 14:32:01-07 | 1997 | 7 | 10 | 14 | 32 | 1 ++(16 rows) SELECT d1 as timestamptz, date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec, @@ -27952,28 +27941,29 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST | 2000 | 52 | 7 | 0 | 366 - Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 1 | 1 -(66 rows) -+ timestamptz | isoyear | week | isodow | dow | doy -+---------------------------+---------+------+--------+-----+----- -+ 4714-11-23 16:00:00-08 BC | -4713 | 47 | 7 | 0 | 327 -+ 1969-12-31 16:00:00-08 | 1970 | 1 | 3 | 3 | 365 -+ 1997-01-02 00:00:00-08 | 1997 | 1 | 4 | 4 | 2 -+ 1997-01-02 03:04:05-08 | 1997 | 1 | 4 | 4 | 2 -+ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-06-10 17:32:01-07 | 1997 | 24 | 2 | 2 | 161 -+ 2001-09-22 18:19:20-07 | 2001 | 38 | 6 | 6 | 265 -+ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 09:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 09:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-02-10 14:32:01-08 | 1997 | 7 | 1 | 1 | 41 -+ 1997-07-10 14:32:01-07 | 1997 | 28 | 4 | 4 | 191 -+(15 rows) ++ timestamptz | isoyear | week | isodow | dow | doy ++------------------------+---------+------+--------+-----+----- ++ -infinity | -4714 | 47 | 5 | 5 | 326 ++ infinity | 294277 | 1 | 1 | 1 | 1 ++ 1969-12-31 16:00:00-08 | 1970 | 1 | 3 | 3 | 365 ++ 1997-01-02 00:00:00-08 | 1997 | 1 | 4 | 4 | 2 ++ 1997-01-02 03:04:05-08 | 1997 | 1 | 4 | 4 | 2 ++ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-06-10 17:32:01-07 | 1997 | 24 | 2 | 2 | 161 ++ 2001-09-22 18:19:20-07 | 2001 | 38 | 6 | 6 | 265 ++ 1997-02-10 17:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 09:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 09:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-02-10 14:32:01-08 | 1997 | 7 | 1 | 1 | 41 ++ 1997-07-10 14:32:01-07 | 1997 | 28 | 4 | 4 | 191 ++(16 rows) SELECT d1 as timestamptz, date_part( 'decade', d1) AS decade, -@@ -1087,150 +686,48 @@ +@@ -1087,150 +691,50 @@ round(date_part( 'julian', d1)) AS julian, date_part( 'epoch', d1) AS epoch FROM TIMESTAMPTZ_TBL; @@ -28046,24 +28036,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST | 200 | 20 | 2 | 2451911 | 978312721 - Mon Jan 01 17:32:01 2001 PST | 200 | 21 | 3 | 2451912 | 978399121 -(66 rows) -+ timestamptz | decade | century | millennium | julian | epoch -+---------------------------+--------+---------+------------+--------------+------------------------ -+ 4714-11-23 16:00:00-08 BC | -472 | -48 | -5 | -0 | -7.952618389194932e+09 -+ 1969-12-31 16:00:00-08 | 196 | 20 | 2 | 2.440588e+06 | 0 -+ 1997-01-02 00:00:00-08 | 199 | 20 | 2 | 2.450451e+06 | 8.52192e+08 -+ 1997-01-02 03:04:05-08 | 199 | 20 | 2 | 2.450451e+06 | 8.52203045e+08 -+ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 -+ 1997-06-10 17:32:01-07 | 199 | 20 | 2 | 2.450611e+06 | 8.65989121e+08 -+ 2001-09-22 18:19:20-07 | 200 | 21 | 3 | 2.452176e+06 | 1.00120796e+09 -+ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 09:32:01-08 | 199 | 20 | 2 | 2.45049e+06 | 8.55595921e+08 -+ 1997-02-10 09:32:01-08 | 199 | 20 | 2 | 2.45049e+06 | 8.55595921e+08 -+ 1997-02-10 14:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55613921e+08 -+ 1997-07-10 14:32:01-07 | 199 | 20 | 2 | 2.450641e+06 | 8.68570321e+08 -+(15 rows) ++ timestamptz | decade | century | millennium | julian | epoch ++------------------------+--------+---------+------------+----------------+------------------------ ++ -infinity | -472 | -48 | -5 | -366 | -7.984240789194932e+09 ++ infinity | 29427 | 2943 | 295 | 1.09203529e+08 | 9.46065545224191e+08 ++ 1969-12-31 16:00:00-08 | 196 | 20 | 2 | 2.440588e+06 | 0 ++ 1997-01-02 00:00:00-08 | 199 | 20 | 2 | 2.450451e+06 | 8.52192e+08 ++ 1997-01-02 03:04:05-08 | 199 | 20 | 2 | 2.450451e+06 | 8.52203045e+08 ++ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 ++ 1997-06-10 17:32:01-07 | 199 | 20 | 2 | 2.450611e+06 | 8.65989121e+08 ++ 2001-09-22 18:19:20-07 | 200 | 21 | 3 | 2.452176e+06 | 1.00120796e+09 ++ 1997-02-10 17:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 09:32:01-08 | 199 | 20 | 2 | 2.45049e+06 | 8.55595921e+08 ++ 1997-02-10 09:32:01-08 | 199 | 20 | 2 | 2.45049e+06 | 8.55595921e+08 ++ 1997-02-10 14:32:01-08 | 199 | 20 | 2 | 2.450491e+06 | 8.55613921e+08 ++ 1997-07-10 14:32:01-07 | 199 | 20 | 2 | 2.450641e+06 | 8.68570321e+08 ++(16 rows) SELECT d1 as timestamptz, date_part( 'timezone', d1) AS timezone, @@ -28139,28 +28130,29 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST | -28800 | -8 | 0 - Mon Jan 01 17:32:01 2001 PST | -28800 | -8 | 0 -(66 rows) -+ timestamptz | timezone | timezone_hour | timezone_minute -+---------------------------+----------+---------------+----------------- -+ 4714-11-23 16:00:00-08 BC | -28800 | -8 | 0 -+ 1969-12-31 16:00:00-08 | -28800 | -8 | 0 -+ 1997-01-02 00:00:00-08 | -28800 | -8 | 0 -+ 1997-01-02 03:04:05-08 | -28800 | -8 | 0 -+ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 -+ 1997-06-10 17:32:01-07 | -25200 | -7 | 0 -+ 2001-09-22 18:19:20-07 | -25200 | -7 | 0 -+ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 09:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 09:32:01-08 | -28800 | -8 | 0 -+ 1997-02-10 14:32:01-08 | -28800 | -8 | 0 -+ 1997-07-10 14:32:01-07 | -25200 | -7 | 0 -+(15 rows) ++ timestamptz | timezone | timezone_hour | timezone_minute ++------------------------+----------+---------------+----------------- ++ -infinity | -28800 | -8 | 0 ++ infinity | -28800 | -8 | 0 ++ 1969-12-31 16:00:00-08 | -28800 | -8 | 0 ++ 1997-01-02 00:00:00-08 | -28800 | -8 | 0 ++ 1997-01-02 03:04:05-08 | -28800 | -8 | 0 ++ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 ++ 1997-06-10 17:32:01-07 | -25200 | -7 | 0 ++ 2001-09-22 18:19:20-07 | -25200 | -7 | 0 ++ 1997-02-10 17:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 09:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 09:32:01-08 | -28800 | -8 | 0 ++ 1997-02-10 14:32:01-08 | -28800 | -8 | 0 ++ 1997-07-10 14:32:01-07 | -25200 | -7 | 0 ++(16 rows) -- extract implementation is mostly the same as date_part, so only -- test a few cases for additional coverage. -@@ -1241,897 +738,289 @@ +@@ -1241,897 +745,301 @@ round(extract(julian from d1)) AS julian, extract(epoch from d1) AS epoch FROM TIMESTAMPTZ_TBL; @@ -28233,24 +28225,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - Sun Dec 31 17:32:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451911 | 978312721.000000 - Mon Jan 01 17:32:01 2001 PST | 1000000 | 1000.000 | 1.000000 | 2451912 | 978399121.000000 -(66 rows) -+ timestamp | microseconds | milliseconds | seconds | julian | epoch -+---------------------------+--------------+--------------+---------+--------------+------------------------ -+ 4714-11-23 16:00:00-08 BC | 0 | 0 | 0 | -0 | -7.952618389194932e+09 -+ 1969-12-31 16:00:00-08 | 0 | 0 | 0 | 2.440588e+06 | 0 -+ 1997-01-02 00:00:00-08 | 0 | 0 | 0 | 2.450451e+06 | 8.52192e+08 -+ 1997-01-02 03:04:05-08 | 5e+06 | 5000 | 5 | 2.450451e+06 | 8.52203045e+08 -+ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 -+ 1997-06-10 17:32:01-07 | 1e+06 | 1000 | 1 | 2.450611e+06 | 8.65989121e+08 -+ 2001-09-22 18:19:20-07 | 2e+07 | 20000 | 20 | 2.452176e+06 | 1.00120796e+09 -+ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 -+ 1997-02-10 09:32:01-08 | 1e+06 | 1000 | 1 | 2.45049e+06 | 8.55595921e+08 -+ 1997-02-10 09:32:01-08 | 1e+06 | 1000 | 1 | 2.45049e+06 | 8.55595921e+08 -+ 1997-02-10 14:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55613921e+08 -+ 1997-07-10 14:32:01-07 | 1e+06 | 1000 | 1 | 2.450641e+06 | 8.68570321e+08 -+(15 rows) ++ timestamp | microseconds | milliseconds | seconds | julian | epoch ++------------------------+---------------+--------------+-----------+----------------+------------------------ ++ -infinity | 0 | 0 | 0 | -366 | -7.984240789194932e+09 ++ infinity | 5.9999999e+07 | 59999.999 | 59.999999 | 1.09203529e+08 | 9.46065545224191e+08 ++ 1969-12-31 16:00:00-08 | 0 | 0 | 0 | 2.440588e+06 | 0 ++ 1997-01-02 00:00:00-08 | 0 | 0 | 0 | 2.450451e+06 | 8.52192e+08 ++ 1997-01-02 03:04:05-08 | 5e+06 | 5000 | 5 | 2.450451e+06 | 8.52203045e+08 ++ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 ++ 1997-06-10 17:32:01-07 | 1e+06 | 1000 | 1 | 2.450611e+06 | 8.65989121e+08 ++ 2001-09-22 18:19:20-07 | 2e+07 | 20000 | 20 | 2.452176e+06 | 1.00120796e+09 ++ 1997-02-10 17:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55624721e+08 ++ 1997-02-10 09:32:01-08 | 1e+06 | 1000 | 1 | 2.45049e+06 | 8.55595921e+08 ++ 1997-02-10 09:32:01-08 | 1e+06 | 1000 | 1 | 2.45049e+06 | 8.55595921e+08 ++ 1997-02-10 14:32:01-08 | 1e+06 | 1000 | 1 | 2.450491e+06 | 8.55613921e+08 ++ 1997-07-10 14:32:01-07 | 1e+06 | 1000 | 1 | 2.450641e+06 | 8.68570321e+08 ++(16 rows) -- value near upper bound uses special case in code SELECT date_part('epoch', '294270-01-01 00:00:00+00'::timestamptz); @@ -28368,7 +28361,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec - MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan -(66 rows) -+ monday Monday monday mon Mon mon NOVEMBER November november XI NOV Nov nov ++ saturday Saturday saturday sat Sat sat NOVEMBER November november XI NOV Nov nov ++ monday Monday monday mon Mon mon JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan + thursday Thursday thursday thu Thu thu JANUARY January january I JAN Jan jan @@ -28383,7 +28377,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - + monday Monday monday mon Mon mon FEBRUARY February february II FEB Feb feb + monday Monday monday mon Mon mon FEBRUARY February february II FEB Feb feb + thursday Thursday thursday thu Thu thu JULY July july VII JUL Jul jul -+(15 rows) ++(16 rows) SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') FROM TIMESTAMPTZ_TBL; @@ -28456,9 +28450,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - SUNDAY Sunday sunday DECEMBER December december XII - MONDAY Monday monday JANUARY January january I -(66 rows) -+ to_char -+------------------------------------------------------- -+ monday Monday monday NOVEMBER November november XI ++ to_char ++---------------------------------------------------------- ++ saturday Saturday saturday NOVEMBER November november XI ++ monday Monday monday JANUARY January january I + thursday Thursday thursday JANUARY January january I + thursday Thursday thursday JANUARY January january I + thursday Thursday thursday JANUARY January january I @@ -28473,7 +28468,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - + monday Monday monday FEBRUARY February february II + monday Monday monday FEBRUARY February february II + thursday Thursday thursday JULY July july VII -+(15 rows) ++(16 rows) SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') FROM TIMESTAMPTZ_TBL; @@ -28488,9 +28483,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 - 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 - 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 -+ to_char -+------------------------------------------------- -+ 4,714 4714 714 14 4 -48 4 11 47 328 24 2 0 ++ to_char ++--------------------------------------------------------- ++ 4,715 4715 715 15 5 -48 4 11 47 327 23 7 -366 ++ 294,277 294277 277 77 7 2943 1 01 01 001 01 2 109203528 + 1,970 1970 970 70 0 20 1 01 01 001 01 5 2440588 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 @@ -28557,7 +28553,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910 - 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') FROM TIMESTAMPTZ_TBL; @@ -28572,9 +28568,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 - 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 - 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 -+ to_char -+------------------------------------------------ -+ 4,714 4714 714 14 4 -48 4 11 47 328 24 2 0 ++ to_char ++---------------------------------------------------- ++ 4,715 4715 715 15 5 -48 4 11 47 327 23 7 -366 ++ 294,277 294277 277 77 7 2943 1 1 1 1 1 2 109203528 + 1,970 1970 970 70 0 20 1 1 1 1 1 5 2440588 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 @@ -28641,7 +28638,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910 - 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS') FROM TIMESTAMPTZ_TBL; @@ -28711,6 +28708,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 - 05 05 17 32 01 63121 ++ 11 11 23 59 59 86399 + 12 12 00 00 00 0 + 08 08 08 00 00 28800 + 11 11 11 04 05 39845 @@ -28726,7 +28724,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -(66 rows) + 10 10 22 32 01 81121 + 09 09 21 32 01 77521 -+(15 rows) ++(16 rows) SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') FROM TIMESTAMPTZ_TBL; @@ -28760,6 +28758,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" ++ HH:MI:SS is 11:59:59 "text between quote marks" + HH:MI:SS is 12:00:00 "text between quote marks" + HH:MI:SS is 08:00:00 "text between quote marks" + HH:MI:SS is 11:04:05 "text between quote marks" @@ -28810,7 +28809,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - HH:MI:SS is 05:32:01 "text between quote marks" - HH:MI:SS is 05:32:01 "text between quote marks" -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d1, 'HH24--text--MI--text--SS') FROM TIMESTAMPTZ_TBL; @@ -28880,6 +28879,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 17--text--32--text--01 - 17--text--32--text--01 - 17--text--32--text--01 ++ 23--text--59--text--59 + 00--text--00--text--00 + 08--text--00--text--00 + 11--text--04--text--05 @@ -28895,12 +28895,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -(66 rows) + 22--text--32--text--01 + 21--text--32--text--01 -+(15 rows) ++(16 rows) SELECT to_char(d1, 'YYYYTH YYYYth Jth') FROM TIMESTAMPTZ_TBL; - to_char - ------------------------- +- to_char +-------------------------- - - - 1969TH 1969th 2440587th @@ -28910,7 +28910,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th - 1997TH 1997th 2450490th -+ 4714TH 4714th 0th ++ to_char ++------------------------------- ++ 4715TH 4715th -366th ++ 294277TH 294277th 109203528th + 1970TH 1970th 2440588th 1997TH 1997th 2450451st 1997TH 1997th 2450451st @@ -28977,12 +28980,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 2000TH 2000th 2451910th - 2001ST 2001st 2451911th -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') FROM TIMESTAMPTZ_TBL; - to_char - --------------------------------------------------------------------- +- to_char +---------------------------------------------------------------------- - - - 1969 A.D. 1969 a.d. 1969 ad 04:00:00 P.M. 04:00:00 p.m. 04:00:00 pm @@ -29019,9 +29022,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm -+ 4714 B.C. 4714 b.c. 4714 bc 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am ++ to_char ++--------------------------------------------------------------------------- ++ 4715 B.C. 4715 b.c. 4715 bc 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am ++ 294277 A.D. 294277 a.d. 294277 ad 11:59:59 P.M. 11:59:59 p.m. 11:59:59 pm + 1970 A.D. 1970 a.d. 1970 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am + 1997 A.D. 1997 a.d. 1997 ad 08:00:00 A.M. 08:00:00 a.m. 08:00:00 am + 1997 A.D. 1997 a.d. 1997 ad 11:04:05 A.M. 11:04:05 a.m. 11:04:05 am @@ -29037,6 +29041,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +- 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 0097 B.C. 0097 b.c. 0097 bc 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 0097 A.D. 0097 a.d. 0097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm - 0597 A.D. 0597 a.d. 0597 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm @@ -29063,12 +29069,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -(66 rows) + 1997 A.D. 1997 a.d. 1997 ad 10:32:01 P.M. 10:32:01 p.m. 10:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 09:32:01 P.M. 09:32:01 p.m. 09:32:01 pm -+(15 rows) ++(16 rows) SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID') FROM TIMESTAMPTZ_TBL; - to_char - ------------------------ +- to_char +------------------------- - - - 1970 970 70 0 01 003 3 @@ -29078,7 +29084,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 - 1997 997 97 7 07 043 1 -+ 4714 714 14 4 48 330 1 ++ to_char ++-------------------------- ++ 4715 715 15 5 47 328 6 ++ 294277 277 77 7 01 001 1 + 1970 970 70 0 01 004 4 1997 997 97 7 01 004 4 1997 997 97 7 01 004 4 @@ -29145,7 +29154,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 2000 000 00 0 52 364 7 - 2001 001 01 1 01 001 1 -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') FROM TIMESTAMPTZ_TBL; @@ -29160,7 +29169,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 - 1997 997 97 7 7 43 1 -+ 4714 714 14 4 48 330 1 ++ 4715 715 15 5 47 328 6 ++ 294277 277 77 7 1 1 1 + 1970 970 70 0 1 4 4 1997 997 97 7 1 4 4 1997 997 97 7 1 4 4 @@ -29227,11 +29237,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - 2000 0 0 0 52 364 7 - 2001 1 1 1 1 1 1 -(66 rows) -+(15 rows) ++(16 rows) SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US') FROM (VALUES -@@ -2140,14 +1029,7 @@ +@@ -2140,14 +1048,7 @@ ('2018-11-02 12:34:56.78901'), ('2018-11-02 12:34:56.78901234') ) d(d); @@ -29247,33 +29257,33 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -- Check OF, TZH, TZM with various zone offsets, particularly fractional hours SET timezone = '00:00'; SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; -@@ -2281,22 +1163,34 @@ +@@ -2281,22 +1182,34 @@ CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz); -- Test year field value with len > 4 INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TST VALUES(2, 'Sat Mar 12 23:58:48 10000 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TST VALUES(3, 'Sat Mar 12 23:58:48 100000 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TST VALUES(3, '10000 Mar 12 23:58:48 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TST VALUES(4, '100000312 23:58:48 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev INSERT INTO TIMESTAMPTZ_TST VALUES(4, '1000000312 23:58:48 IST'); +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev --Verify data SELECT * FROM TIMESTAMPTZ_TST ORDER BY a; - a | b @@ -29291,7 +29301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - --Cleanup DROP TABLE TIMESTAMPTZ_TST; -@@ -2304,21 +1198,21 @@ +@@ -2304,21 +1217,21 @@ set TimeZone to 'America/New_York'; -- numeric timezone SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33); @@ -29322,7 +29332,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - (1 row) WITH tzs (tz) AS (VALUES -@@ -2327,84 +1221,73 @@ +@@ -2327,84 +1240,73 @@ ('+10:00:1'), ('+10:00:01'), ('+10:00:10')) SELECT make_timestamptz(2010, 2, 27, 3, 45, 00, tz), tz FROM tzs; @@ -29442,7 +29452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - (1 row) RESET TimeZone; -@@ -2412,36 +1295,36 @@ +@@ -2412,36 +1314,36 @@ select * from generate_series('2020-01-01 00:00'::timestamptz, '2020-01-02 03:00'::timestamptz, '1 hour'::interval); @@ -29509,7 +29519,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - (28 rows) -- the LIMIT should allow this to terminate in a reasonable amount of time -@@ -2449,78 +1332,47 @@ +@@ -2449,78 +1351,47 @@ select generate_series('2022-01-01 00:00'::timestamptz, 'infinity'::timestamptz, '1 month'::interval) limit 10; @@ -29607,7 +29617,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - RESET TimeZone; -- -- Test behavior with a dynamic (time-varying) timezone abbreviation. -@@ -2529,612 +1381,472 @@ +@@ -2529,376 +1400,284 @@ -- SET TimeZone to 'UTC'; SELECT '2011-03-27 00:00:00 Europe/Moscow'::timestamptz; @@ -29699,7 +29709,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 01:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29708,7 +29718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 01:59:59 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29717,7 +29727,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 02:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29726,7 +29736,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 02:00:01 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29735,7 +29745,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 02:59:59 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29744,7 +29754,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 03:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29753,7 +29763,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 03:00:01 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29762,7 +29772,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 04:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29771,7 +29781,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2014-10-26 00:00:00 Europe/Moscow'::timestamptz; - timestamptz ------------------------------- @@ -29825,7 +29835,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2014-10-26 00:59:59 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29834,7 +29844,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2014-10-26 01:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29843,7 +29853,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2014-10-26 01:00:01 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29852,7 +29862,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2014-10-26 02:00:00 MSK'::timestamptz; - timestamptz ------------------------------- @@ -29861,7 +29871,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; - timezone ------------------------------- @@ -30138,23 +30148,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - (1 row) -- upper limit varies between integer and float timestamps, so hard to test - -- nonfinite values - SELECT to_timestamp(' Infinity'::float); -- to_timestamp ---------------- -- infinity -+ to_timestamp -+--------------------------------- -+ 294276-12-31 23:59:59.999999+00 - (1 row) - - SELECT to_timestamp('-Infinity'::float); -- to_timestamp ---------------- -- -infinity -+ to_timestamp -+--------------------------- -+ 4714-11-24 00:00:00+00 BC +@@ -2916,225 +1695,177 @@ (1 row) SELECT to_timestamp('NaN'::float); @@ -30465,7 +30459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/timestamptz.out - -- -- Test that AT TIME ZONE isn't misoptimized when using an index (bug #14504) -- -@@ -3142,15 +1854,14 @@ +@@ -3142,15 +1873,14 @@ insert into tmptz values ('2017-01-18 00:00+00'); explain (costs off) select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00'; @@ -30810,7 +30804,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab (1 row) SELECT 'a:* & nbb:*ac | doo:a* | goo'::tsquery; -@@ -393,133 +375,72 @@ +@@ -393,133 +375,60 @@ -- Also try it with non-error-throwing API SELECT pg_input_is_valid('foo', 'tsquery'); @@ -30886,7 +30880,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: numnode(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT numnode( 'new & york'::tsquery ); - numnode ---------- @@ -30895,7 +30889,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: numnode(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT numnode( 'new & york | qwery'::tsquery ); - numnode ---------- @@ -30904,7 +30898,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: numnode(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT 'foo & bar'::tsquery && 'asd'; - ?column? ------------------------ @@ -30942,40 +30936,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - 'a' <-> ( 'b' & 'd' ) -(1 row) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT 'a' <-> 'b & d'::tsquery -+ ^ ++ERROR: unsupported binary operator: <-> SELECT 'a & g' <-> 'b & d'::tsquery; - ?column? ---------------------------------- - ( 'a' & 'g' ) <-> ( 'b' & 'd' ) -(1 row) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT 'a & g' <-> 'b & d'::tsquery -+ ^ ++ERROR: unsupported binary operator: <-> SELECT 'a & g' <-> 'b | d'::tsquery; - ?column? ---------------------------------- - ( 'a' & 'g' ) <-> ( 'b' | 'd' ) -(1 row) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT 'a & g' <-> 'b | d'::tsquery -+ ^ ++ERROR: unsupported binary operator: <-> SELECT 'a & g' <-> 'b <-> d'::tsquery; - ?column? ------------------------------------ - ( 'a' & 'g' ) <-> ( 'b' <-> 'd' ) -(1 row) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT 'a & g' <-> 'b <-> d'::tsquery -+ ^ ++ERROR: unsupported binary operator: <-> SELECT tsquery_phrase('a <3> g', 'b & d', 10); - tsquery_phrase --------------------------------- @@ -30984,11 +30966,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: tsquery_phrase(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- tsvector-tsquery operations SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca' as "true"; true -@@ -625,29 +546,21 @@ +@@ -625,29 +534,21 @@ -- historically, a stripped tsvector matches queries ignoring weights: SELECT strip('wa:1A'::tsvector) @@ 'w:*A'::tsquery as "true"; @@ -30999,7 +30981,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT strip('wa:1A'::tsvector) @@ 'w:*D'::tsquery as "true"; - true ------- @@ -31008,7 +30990,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT strip('wa:1A'::tsvector) @@ '!w:*A'::tsquery as "false"; - false -------- @@ -31017,7 +30999,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT strip('wa:1A'::tsvector) @@ '!w:*D'::tsquery as "false"; - false -------- @@ -31026,11 +31008,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT 'supernova'::tsvector @@ 'super'::tsquery AS "false"; false ------- -@@ -765,11 +678,9 @@ +@@ -765,11 +666,9 @@ -- without position data, phrase search does not match SELECT strip(to_tsvector('simple', '1 2 3 4')) @@ '1 <-> 2 <-> 3' AS "false"; @@ -31041,11 +31023,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select to_tsvector('simple', 'q x q y') @@ 'q <-> (x & y)' AS "false"; false ------- -@@ -939,35 +850,25 @@ +@@ -939,35 +838,25 @@ (1 row) select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> y' AS "false"; @@ -31056,7 +31038,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> !y' AS "false"; - false -------- @@ -31065,7 +31047,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> !!y' AS "false"; - false -------- @@ -31074,7 +31056,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select strip(to_tsvector('simple', 'x y q y')) @@ '!(x <-> y)' AS "true"; - true ------- @@ -31083,7 +31065,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select strip(to_tsvector('simple', 'x y q y')) @@ '!(x <2> y)' AS "true"; - true ------- @@ -31092,11 +31074,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select to_tsvector('simple', 'x y q y') @@ '!foo' AS "true"; true ------ -@@ -1036,125 +937,85 @@ +@@ -1036,125 +925,85 @@ (1 row) SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a | s'); @@ -31107,7 +31089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | s'); - ts_rank_cd ------------- @@ -31116,7 +31098,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | s:*'); - ts_rank_cd ------------- @@ -31125,7 +31107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | sa:*'); - ts_rank_cd ------------- @@ -31134,7 +31116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:3C sab:2c d g'::tsvector, 'a | sa:*'); - ts_rank_cd ------------- @@ -31143,7 +31125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2B d g'::tsvector, 'a | s'); - ts_rank_cd ------------- @@ -31152,7 +31134,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a | s'); - ts_rank_cd ------------- @@ -31161,7 +31143,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a & s'); - ts_rank_cd ------------- @@ -31170,7 +31152,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2B d g'::tsvector, 'a & s'); - ts_rank_cd ------------- @@ -31179,7 +31161,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a & s'); - ts_rank_cd ------------- @@ -31188,7 +31170,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2A d g'::tsvector, 'a <-> s'); - ts_rank_cd ------------- @@ -31197,7 +31179,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a <-> s'); - ts_rank_cd ------------- @@ -31206,7 +31188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a <-> s'); - ts_rank_cd ------------- @@ -31215,7 +31197,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2 d:2A g'::tsvector, 'a <-> s'); - ts_rank_cd ------------- @@ -31224,7 +31206,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 s:2,3A d:2A g'::tsvector, 'a <2> s:A'); - ts_rank_cd ------------- @@ -31233,7 +31215,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 b:2 s:3A d:2A g'::tsvector, 'a <2> s:A'); - ts_rank_cd ------------- @@ -31242,7 +31224,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2D sb:2A g'::tsvector, 'a <-> s:*'); - ts_rank_cd ------------- @@ -31251,7 +31233,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:*'); - ts_rank_cd ------------- @@ -31260,7 +31242,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:* <-> sa:A'); - ts_rank_cd ------------- @@ -31269,7 +31251,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:* <-> sa:B'); - ts_rank_cd ------------- @@ -31278,11 +31260,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT 'a:1 b:2'::tsvector @@ 'a <-> b'::tsquery AS "true"; true ------ -@@ -1217,228 +1078,143 @@ +@@ -1217,228 +1066,143 @@ -- tsvector editing operations SELECT strip('w:12B w:13* w:12,5,6 a:1,3* a:3 w asd:1dc asd'::tsvector); @@ -31293,7 +31275,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT strip('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); - strip ----------------------------------------------- @@ -31302,7 +31284,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT strip('base hidden rebel spaceship strike'::tsvector); - strip ----------------------------------------------- @@ -31311,7 +31293,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: strip(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete(to_tsvector('english', 'Rebel spaceships, striking from a hidden base'), 'spaceship'); - ts_delete ------------------------------------------- @@ -31320,7 +31302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'base'); - ts_delete --------------------------------------------------------------- @@ -31329,7 +31311,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'bas'); - ts_delete ------------------------------------------------------------------------ @@ -31338,7 +31320,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'bases'); - ts_delete ------------------------------------------------------------------------ @@ -31347,7 +31329,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'spaceship'); - ts_delete ------------------------------------------- @@ -31356,7 +31338,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, 'spaceship'); - ts_delete ----------------------------------- @@ -31365,7 +31347,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceship','rebel']); - ts_delete --------------------------------- @@ -31374,7 +31356,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceships','rebel']); - ts_delete -------------------------------------------------------------- @@ -31383,7 +31365,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceshi','rebel']); - ts_delete -------------------------------------------------------------- @@ -31392,7 +31374,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceship','leya','rebel']); - ts_delete --------------------------------- @@ -31401,7 +31383,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel']); - ts_delete --------------------------- @@ -31410,7 +31392,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel','rebel']); - ts_delete --------------------------- @@ -31419,7 +31401,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel', '', NULL]); - ts_delete --------------------------- @@ -31428,7 +31410,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_delete(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT unnest('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); - unnest ---------------------------------------------- @@ -31497,7 +31479,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: tsvector_to_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT tsvector_to_array('base hidden rebel spaceship strike'::tsvector); - tsvector_to_array --------------------------------------- @@ -31506,7 +31488,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: tsvector_to_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship','strike']); - array_to_tsvector ----------------------------------------------- @@ -31515,18 +31497,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: array_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- null and empty string are disallowed, since we mustn't make an empty lexeme SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship', NULL]); -ERROR: lexeme array may not contain nulls +ERROR: array_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship', '']); -ERROR: lexeme array may not contain empty strings +ERROR: array_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- array_to_tsvector must sort and de-dup SELECT array_to_tsvector(ARRAY['foo','bar','baz','bar']); - array_to_tsvector @@ -31536,7 +31518,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: array_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('w:12B w:13* w:12,5,6 a:1,3* a:3 w asd:1dc asd zxc:81,567,222A'::tsvector, 'c'); - setweight ----------------------------------------------------------- @@ -31545,7 +31527,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c'); - setweight ----------------------------------------------------------- @@ -31554,7 +31536,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a}'); - setweight ------------------------------------------------------- @@ -31563,7 +31545,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a}'); - setweight ------------------------------------------------------- @@ -31572,7 +31554,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a,zxc}'); - setweight --------------------------------------------------------- @@ -31581,7 +31563,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT setweight('a asd w:5,6,12B,13A zxc'::tsvector, 'c', ARRAY['a', 'zxc', '', NULL]); - setweight ---------------------------------- @@ -31590,7 +31572,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: setweight(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_filter('base:7A empir:17 evil:15 first:11 galact:16 hidden:6A rebel:1A spaceship:2A strike:3A victori:12 won:9'::tsvector, '{a}'); - ts_filter -------------------------------------------------------------- @@ -31599,7 +31581,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_filter(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_filter('base hidden rebel spaceship strike'::tsvector, '{a}'); - ts_filter ------------ @@ -31608,16 +31590,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tstypes.out --lab - +ERROR: ts_filter(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_filter('base hidden rebel spaceship strike'::tsvector, '{a,b,NULL}'); -ERROR: weight array may not contain nulls +ERROR: ts_filter(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --label=/mnt/data1/postgres/src/test/regress/results/geometry.out /mnt/data1/postgres/src/test/regress/expected/geometry.out /mnt/data1/postgres/src/test/regress/results/geometry.out --- /mnt/data1/postgres/src/test/regress/expected/geometry.out +++ /mnt/data1/postgres/src/test/regress/results/geometry.out -@@ -9,5314 +9,797 @@ +@@ -9,5314 +9,755 @@ -- SELECT center(f1) AS center FROM BOX_TBL; @@ -32258,10 +32240,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (10,10) | {-1,0,3} | 7 | 7 -(100 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p.f1, l.s, p.f1 <-> l.s AS dist_pl, l.s <-> p.f1 AS dist_lp FROM POINT_TBL p, LINE_TBL l -+ ^ ++ERROR: relation "point_tbl" does not exist -- Distance to line segment SELECT p.f1, l.s, p.f1 <-> l.s AS dist_ps, l.s <-> p.f1 AS dist_sp FROM POINT_TBL p, LSEG_TBL l; - f1 | s | dist_ps | dist_sp @@ -32348,10 +32327,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (10,10) | [(NaN,1),(NaN,90)] | NaN | NaN -(80 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p.f1, l.s, p.f1 <-> l.s AS dist_ps, l.s <-> p.f1 AS dist_sp FROM POINT_TBL p, LSEG_TBL l -+ ^ ++ERROR: relation "point_tbl" does not exist -- Distance to box SELECT p.f1, b.f1, p.f1 <-> b.f1 AS dist_pb, b.f1 <-> p.f1 AS dist_bp FROM POINT_TBL p, BOX_TBL b; - f1 | f1 | dist_pb | dist_bp @@ -32408,10 +32384,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (10,10) | (3,3),(3,3) | 9.89949493661 | 9.89949493661 -(50 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p.f1, b.f1, p.f1 <-> b.f1 AS dist_pb, b.f1 <-> p.f1 AS dist_bp FROM POINT_TBL p, BOX_TBL b -+ ^ ++ERROR: relation "point_tbl" does not exist -- Distance to path SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppath, p1.f1 <-> p.f1 AS dist_pathp FROM POINT_TBL p, PATH_TBL p1; - f1 | f1 | dist_ppath | dist_pathp @@ -32508,10 +32481,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (10,10) | ((11,12),(13,14)) | 2.2360679775 | 2.2360679775 -(90 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppath, p1.f1 <-> p.f1 AS dist_pathp FROM POINT_TBL p, PATH_TBL p1 -+ ^ ++ERROR: relation "point_tbl" does not exist -- Distance to polygon SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppoly, p1.f1 <-> p.f1 AS dist_polyp FROM POINT_TBL p, POLYGON_TBL p1; - f1 | f1 | dist_ppoly | dist_polyp @@ -32588,10 +32558,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (10,10) | ((0,1),(0,1)) | 13.4536240471 | 13.4536240471 -(70 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppoly, p1.f1 <-> p.f1 AS dist_polyp FROM POINT_TBL p, POLYGON_TBL p1 -+ ^ ++ERROR: relation "point_tbl" does not exist -- Construct line through two points SELECT p1.f1, p2.f1, line(p1.f1, p2.f1) FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1 <> p2.f1; @@ -33184,10 +33151,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - {-1,0,3} | {-1,0,3} | 0 -(100 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT l1.s, l2.s, l1.s <-> l2.s FROM LINE_TBL l1, LINE_TBL l2 -+ ^ ++ERROR: relation "line_tbl" does not exist -- Intersect with line SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?# l2.s; - s | s @@ -33590,7 +33554,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT s, s::point FROM LSEG_TBL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Has points less than line segment SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s < l2.s; - s | s @@ -33916,10 +33880,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - [(NaN,1),(NaN,90)] | {-1,0,3} | NaN | NaN -(80 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT l.s, l1.s, l.s <-> l1.s AS dist_sl, l1.s <-> l.s AS dist_ls FROM LSEG_TBL l, LINE_TBL l1 -+ ^ ++ERROR: relation "lseg_tbl" does not exist -- Distance to line segment SELECT l1.s, l2.s, l1.s <-> l2.s FROM LSEG_TBL l1, LSEG_TBL l2; - s | s | ?column? @@ -33990,10 +33951,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | NaN -(64 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT l1.s, l2.s, l1.s <-> l2.s FROM LSEG_TBL l1, LSEG_TBL l2 -+ ^ ++ERROR: relation "lseg_tbl" does not exist -- Distance to box SELECT l.s, b.f1, l.s <-> b.f1 AS dist_sb, b.f1 <-> l.s AS dist_bs FROM LSEG_TBL l, BOX_TBL b; - s | f1 | dist_sb | dist_bs @@ -34040,10 +33998,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - [(NaN,1),(NaN,90)] | (3,3),(3,3) | NaN | NaN -(40 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT l.s, b.f1, l.s <-> b.f1 AS dist_sb, b.f1 <-> l.s AS dist_bs FROM LSEG_TBL l, BOX_TBL b -+ ^ ++ERROR: relation "lseg_tbl" does not exist -- Intersect with line segment SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s ?# l1.s; - s | s @@ -34511,7 +34466,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la + FROM POINT_TBL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT bound_box(a.f1, b.f1) FROM BOX_TBL a, BOX_TBL b; - bound_box @@ -34689,10 +34644,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - (3,3),(3,3) | (3,3),(3,3) | 0 -(25 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT b1.f1, b2.f1, b1.f1 <-> b2.f1 FROM BOX_TBL b1, BOX_TBL b2 -+ ^ ++ERROR: relation "box_tbl" does not exist -- -- Paths -- @@ -34763,7 +34715,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT f1, f1::polygon FROM PATH_TBL WHERE isclosed(f1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Open path cannot be converted to polygon error SELECT f1, f1::polygon FROM PATH_TBL WHERE isopen(f1); -ERROR: open path cannot be converted to polygon @@ -34772,7 +34724,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT f1, f1::polygon FROM PATH_TBL WHERE isopen(f1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Has points less than path SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 < p2.f1; - f1 | f1 @@ -35519,10 +35471,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - ((11,12),(13,14)) | ((11,12),(13,14)) | 0 -(81 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM PATH_TBL p1, PATH_TBL p2 -+ ^ ++ERROR: relation "path_tbl" does not exist -- -- Polygons -- @@ -35758,7 +35707,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT f1, f1::box FROM POLYGON_TBL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- To path SELECT f1, f1::path FROM POLYGON_TBL; - f1 | f1 @@ -35777,7 +35726,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT f1, f1::path FROM POLYGON_TBL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Same as polygon SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 ~= p2.f1; - f1 | f1 @@ -36154,10 +36103,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - ((0,1),(0,1)) | ((0,1),(0,1)) | 0 -(49 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 -+ ^ ++ERROR: relation "polygon_tbl" does not exist -- -- Circles -- @@ -36277,10 +36223,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - <(3,5),NaN> | (NaN,NaN) | NaN -(61 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS distance -+ ^ ++ERROR: relation "circle_tbl" does not exist -- To polygon SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; - f1 | f1 @@ -36298,7 +36241,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la +SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- To polygon with less points SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; - f1 | polygon @@ -37245,10 +37188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la - <(3,5),NaN> | ((0,1),(0,1)) | NaN -(56 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p -+ ^ ++ERROR: relation "circle_tbl" does not exist -- Check index behavior for circles CREATE INDEX gcircleind ON circle_tbl USING gist (f1); +ERROR: relation "circle_tbl" does not exist @@ -37312,7 +37252,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la + ORDER BY (poly_center(f1))[0] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon ORDER BY (poly_center(f1))[0]; @@ -37342,7 +37282,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/geometry.out --la + ORDER BY (poly_center(f1))[0] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- test non-error-throwing API for some core types SELECT pg_input_is_valid('(1', 'circle'); - pg_input_is_valid @@ -38003,7 +37943,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SET DateStyle = 'Postgres, MDY'; +ERROR: unimplemented: only ISO style is supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41773/v24.2 ++See: https://go.crdb.dev/issue-v/41773/dev SHOW TimeZone; -- Many of these tests depend on the prevailing setting - TimeZone + timezone @@ -38121,7 +38061,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - +ERROR: parsing as type timestamp: unimplemented: timestamp abbreviations not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31710/v24.2 ++See: https://go.crdb.dev/issue-v/31710/dev SELECT timestamp with time zone '2001-12-27 allballs'; - timestamptz ------------------------------- @@ -38255,7 +38195,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SET DateStyle = 'German'; +ERROR: unimplemented: only ISO style is supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41773/v24.2 ++See: https://go.crdb.dev/issue-v/41773/dev SELECT timestamp with time zone '27.12.2001 04:05:06.789+08'; - timestamptz ------------------------------ @@ -38359,7 +38299,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SET DateStyle = 'Postgres, MDY'; +ERROR: unimplemented: only ISO style is supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41773/v24.2 ++See: https://go.crdb.dev/issue-v/41773/dev -- Check Julian dates BC SELECT date 'J1520447' AS "Confucius' Birthday"; Confucius' Birthday @@ -38453,18 +38393,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -ERROR: invalid input syntax for type timestamp: "1995-08-06 infinity" -LINE 1: SELECT timestamp '1995-08-06 infinity'; - ^ -+ timestamp -+------------------------------ -+ 294276-12-31 23:59:59.999999 ++ timestamp ++----------- ++ infinity +(1 row) + SELECT timestamp '1995-08-06 -infinity'; -ERROR: invalid input syntax for type timestamp: "1995-08-06 -infinity" -LINE 1: SELECT timestamp '1995-08-06 -infinity'; - ^ -+ timestamp -+------------------------ -+ 4714-11-24 00:00:00 BC ++ timestamp ++----------- ++ -infinity +(1 row) + SELECT timestamp 'epoch 01:01:01'; @@ -38480,18 +38420,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -ERROR: invalid input syntax for type timestamp: "infinity 01:01:01" -LINE 1: SELECT timestamp 'infinity 01:01:01'; - ^ -+ timestamp -+------------------------------ -+ 294276-12-31 23:59:59.999999 ++ timestamp ++----------- ++ infinity +(1 row) + SELECT timestamp '-infinity 01:01:01'; -ERROR: invalid input syntax for type timestamp: "-infinity 01:01:01" -LINE 1: SELECT timestamp '-infinity 01:01:01'; - ^ -+ timestamp -+------------------------ -+ 4714-11-24 00:00:00 BC ++ timestamp ++----------- ++ -infinity +(1 row) + SELECT timestamp 'now epoch'; @@ -38517,18 +38457,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -ERROR: invalid input syntax for type timestamp with time zone: "1995-08-06 infinity" -LINE 1: SELECT timestamptz '1995-08-06 infinity'; - ^ -+ timestamptz -+--------------------------------- -+ 294276-12-31 15:59:59.999999-08 ++ timestamptz ++------------- ++ infinity +(1 row) + SELECT timestamptz '1995-08-06 -infinity'; -ERROR: invalid input syntax for type timestamp with time zone: "1995-08-06 -infinity" -LINE 1: SELECT timestamptz '1995-08-06 -infinity'; - ^ -+ timestamptz -+--------------------------- -+ 4714-11-23 16:00:00-08 BC ++ timestamptz ++------------- ++ -infinity +(1 row) + SELECT timestamptz 'epoch 01:01:01'; @@ -38544,18 +38484,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -ERROR: invalid input syntax for type timestamp with time zone: "infinity 01:01:01" -LINE 1: SELECT timestamptz 'infinity 01:01:01'; - ^ -+ timestamptz -+--------------------------------- -+ 294276-12-31 15:59:59.999999-08 ++ timestamptz ++------------- ++ infinity +(1 row) + SELECT timestamptz '-infinity 01:01:01'; -ERROR: invalid input syntax for type timestamp with time zone: "-infinity 01:01:01" -LINE 1: SELECT timestamptz '-infinity 01:01:01'; - ^ -+ timestamptz -+--------------------------- -+ 4714-11-23 16:00:00-08 BC ++ timestamptz ++------------- ++ -infinity +(1 row) + SELECT timestamptz 'now epoch'; @@ -38724,7 +38664,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT (timestamp without time zone 'tomorrow' > 'now') as "True"; True ------ -@@ -555,193 +469,69 @@ +@@ -555,193 +469,52 @@ -- This test used to be timestamp(date,time) but no longer allowed by grammar -- to enable support for SQL99 timestamp type syntax. SELECT date '1994-01-01' + time '11:00' AS "Jan_01_1994_11am"; @@ -38833,24 +38773,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - Mon Dec 31 17:32:01 2001 - Tue Jan 01 17:32:01 2002 -(65 rows) -+ one_year -+------------------------ -+ 4713-11-24 00:00:00 BC -+ 1971-01-01 00:00:00 -+ 1998-01-02 00:00:00 -+ 1998-01-02 03:04:05 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+ 1998-06-10 17:32:01 -+ 2002-09-22 18:19:20 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+ 1998-02-10 17:32:01 -+(14 rows) - +- ++ERROR: timestamp "-4714-11-23T00:00:00Z" exceeds supported timestamp bounds SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMP_TBL; - one_year ------------------------------ @@ -38921,7 +38845,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - Sat Jan 01 17:32:01 2000 -(65 rows) - -+ERROR: timestamp "-4714-11-24T00:00:00Z" exceeds supported timestamp bounds ++ERROR: timestamp "-4714-11-23T00:00:00Z" exceeds supported timestamp bounds SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29"; - Feb 29 ------------------------------- @@ -38959,7 +38883,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True"; True ------ -@@ -769,210 +559,86 @@ +@@ -769,210 +542,68 @@ -- timestamp with time zone, interval arithmetic around DST change -- (just for fun, let's use an intentionally nonstandard POSIX zone spec) SET TIME ZONE 'CST7CDT,M4.1.0,M10.5.0'; @@ -39121,25 +39045,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - Mon Dec 31 17:32:01 2001 PST - Tue Jan 01 17:32:01 2002 PST -(66 rows) -+ one_year -+--------------------------- -+ 4713-11-23 16:00:00-08 BC -+ 1970-12-31 16:00:00-08 -+ 1998-01-02 00:00:00-08 -+ 1998-01-02 03:04:05-08 -+ 1998-02-10 17:32:01-08 -+ 1998-02-10 17:32:01-08 -+ 1998-02-10 17:32:01-08 -+ 1998-02-10 17:32:01-08 -+ 1998-06-10 17:32:01-07 -+ 2002-09-22 18:19:20-07 -+ 1998-02-10 17:32:01-08 -+ 1998-02-10 09:32:01-08 -+ 1998-02-10 09:32:01-08 -+ 1998-02-10 14:32:01-08 -+ 1998-07-10 14:32:01-07 -+(15 rows) - +- ++ERROR: timestamp "-4713-11-22T16:00:00-08:00" exceeds supported timestamp bounds SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL; - one_year ---------------------------------- @@ -39211,7 +39118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - Sat Jan 01 17:32:01 2000 PST -(66 rows) - -+ERROR: timestamp "-4714-11-23T16:00:00-08:00" exceeds supported timestamp bounds ++ERROR: timestamp "-4715-11-22T16:00:00-08:00" exceeds supported timestamp bounds -- -- time, interval arithmetic -- @@ -39225,7 +39132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la (1 row) SELECT CAST(interval '02:03' AS time) AS "02:03:00"; -@@ -1006,13 +672,9 @@ +@@ -1006,13 +637,9 @@ (1 row) SELECT CAST(time with time zone '01:02-08' AS interval) AS "+00:01"; @@ -39241,7 +39148,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08"; 23:29:00-08 ------------- -@@ -1034,14 +696,14 @@ +@@ -1034,14 +661,14 @@ + interval '02:01' AS time with time zone) AS time) AS "07:31:00"; 07:31:00 ---------- @@ -39258,7 +39165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la (1 row) SELECT t.d1 AS t, i.f1 AS i, t.d1 + i.f1 AS "add", t.d1 - i.f1 AS "subtract" -@@ -1049,347 +711,67 @@ +@@ -1049,347 +676,67 @@ WHERE t.d1 BETWEEN '1990-01-01' AND '2001-01-01' AND i.f1 BETWEEN '00:00' AND '23:00' ORDER BY 1,2; @@ -39659,7 +39566,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -- SQL9x OVERLAPS operator -- test with time zone -@@ -1409,18 +791,12 @@ +@@ -1409,18 +756,12 @@ SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28') OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '1 day') AS "True"; @@ -39682,7 +39589,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT (timestamp with time zone '2000-11-27', interval '12 hours') OVERLAPS (timestamp with time zone '2000-11-27', interval '12 hours') AS "True"; True -@@ -1452,18 +828,12 @@ +@@ -1452,18 +793,12 @@ SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28') OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '1 day') AS "True"; @@ -39705,7 +39612,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT (timestamp without time zone '2000-11-27', interval '12 hours') OVERLAPS (timestamp without time zone '2000-11-27', interval '12 hours') AS "True"; True -@@ -1519,648 +889,53 @@ +@@ -1519,648 +854,53 @@ SELECT f1 AS "timestamp" FROM TEMP_TIMESTAMP ORDER BY "timestamp"; @@ -40383,7 +40290,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -- -- Conversions -@@ -2169,80 +944,42 @@ +@@ -2169,80 +909,42 @@ FROM TEMP_TIMESTAMP WHERE f1 <> timestamp 'now' ORDER BY date, "timestamp"; @@ -40481,7 +40388,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t; t --- -@@ -2262,13 +999,11 @@ +@@ -2262,13 +964,11 @@ explain (costs off) select count(*) from date_tbl where f1 between '1997-01-01' and '1998-01-01'; @@ -40500,7 +40407,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la select count(*) from date_tbl where f1 between '1997-01-01' and '1998-01-01'; count -@@ -2279,13 +1014,11 @@ +@@ -2279,13 +979,11 @@ explain (costs off) select count(*) from date_tbl where f1 not between '1997-01-01' and '1998-01-01'; @@ -40519,7 +40426,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la select count(*) from date_tbl where f1 not between '1997-01-01' and '1998-01-01'; count -@@ -2296,13 +1029,11 @@ +@@ -2296,13 +994,11 @@ explain (costs off) select count(*) from date_tbl where f1 between symmetric '1997-01-01' and '1998-01-01'; @@ -40538,7 +40445,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la select count(*) from date_tbl where f1 between symmetric '1997-01-01' and '1998-01-01'; count -@@ -2313,13 +1044,11 @@ +@@ -2313,13 +1009,11 @@ explain (costs off) select count(*) from date_tbl where f1 not between symmetric '1997-01-01' and '1998-01-01'; @@ -40557,7 +40464,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la select count(*) from date_tbl where f1 not between symmetric '1997-01-01' and '1998-01-01'; count -@@ -2331,95 +1060,19 @@ +@@ -2331,95 +1025,20 @@ -- Formats -- SET DateStyle TO 'US,Postgres'; @@ -40645,11 +40552,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la -SET DateStyle TO 'US,ISO'; -SELECT d1 AS us_iso FROM TIMESTAMP_TBL; - us_iso -+ us_postgres - ------------------------ -- -infinity -- infinity -+ 4714-11-24 00:00:00 BC +------------------------- ++ us_postgres ++--------------------- + -infinity + infinity 1970-01-01 00:00:00 - 1997-02-10 17:32:01 - 1997-02-10 17:32:01 @@ -40660,7 +40567,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la 1997-01-02 00:00:00 1997-01-02 03:04:05 1997-02-10 17:32:01 -@@ -2428,237 +1081,115 @@ +@@ -2428,237 +1047,119 @@ 1997-02-10 17:32:01 1997-06-10 17:32:01 2001-09-22 18:19:20 @@ -40676,15 +40583,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la 1997-02-10 17:32:01 1997-02-10 17:32:01 1997-02-10 17:32:01 -+(14 rows) ++(15 rows) + +SET DateStyle TO 'US,ISO'; +ERROR: invalid value for parameter "DateStyle": "US,ISO" +HINT: Available values: {ISO MDY},{ISO DMY},{ISO YMD} +SELECT d1 AS us_iso FROM TIMESTAMP_TBL; -+ us_iso -+------------------------ -+ 4714-11-24 00:00:00 BC ++ us_iso ++--------------------- ++ -infinity ++ infinity + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 @@ -40730,7 +40638,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - 2000-12-31 17:32:01 - 2001-01-01 17:32:01 -(65 rows) -+(14 rows) ++(15 rows) SET DateStyle TO 'US,SQL'; +ERROR: invalid value for parameter "DateStyle": "US,SQL" @@ -40744,10 +40652,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la (1 row) SELECT d1 AS us_sql FROM TIMESTAMP_TBL; - us_sql - ------------------------ -- -infinity -- infinity +- us_sql +------------------------- ++ us_sql ++--------------------- + -infinity + infinity - 01/01/1970 00:00:00 - 02/10/1997 17:32:01 - 02/10/1997 17:32:01 @@ -40812,7 +40722,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - 12/31/2000 17:32:01 - 01/01/2001 17:32:01 -(65 rows) -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 @@ -40826,7 +40735,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(14 rows) ++(15 rows) SET DateStyle TO 'European,Postgres'; +ERROR: invalid value for parameter "DateStyle": "European,Postgres" @@ -40855,8 +40764,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la SELECT d1 AS european_postgres FROM TIMESTAMP_TBL; - european_postgres ------------------------------ -- -infinity -- infinity ++ european_postgres ++--------------------- + -infinity + infinity - Thu 01 Jan 00:00:00 1970 - Mon 10 Feb 17:32:01 1997 - Mon 10 Feb 17:32:01 1997 @@ -40922,9 +40833,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - Mon 01 Jan 17:32:01 2001 - Thu 13 Jun 00:00:00 1957 -(66 rows) -+ european_postgres -+------------------------ -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 @@ -40938,7 +40846,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(14 rows) ++(15 rows) SET DateStyle TO 'European,ISO'; +ERROR: invalid value for parameter "DateStyle": "European,ISO" @@ -40952,11 +40860,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la (1 row) SELECT d1 AS european_iso FROM TIMESTAMP_TBL; - european_iso - ------------------------ -- -infinity -- infinity -+ 4714-11-24 00:00:00 BC +- european_iso +------------------------- ++ european_iso ++--------------------- + -infinity + infinity 1970-01-01 00:00:00 - 1997-02-10 17:32:01 - 1997-02-10 17:32:01 @@ -40967,7 +40876,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la 1997-01-02 00:00:00 1997-01-02 03:04:05 1997-02-10 17:32:01 -@@ -2667,846 +1198,425 @@ +@@ -2667,846 +1168,426 @@ 1997-02-10 17:32:01 1997-06-10 17:32:01 2001-09-22 18:19:20 @@ -40979,6 +40888,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - 1997-02-10 17:32:01 - 1997-02-10 17:32:01 - 1997-02-10 17:32:00 +- 1997-02-10 17:32:01 +- 1997-02-10 17:32:01 - 1997-02-10 17:32:01 1997-02-10 17:32:01 1997-02-10 17:32:01 @@ -40986,8 +40897,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la 1997-02-10 17:32:01 - 1997-02-10 17:32:01 - 1997-02-10 17:32:01 -- 1997-02-10 17:32:01 -- 1997-02-10 17:32:01 - 1997-06-10 18:32:01 - 1997-02-10 17:32:01 - 1997-02-11 17:32:01 @@ -41021,7 +40930,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - 2001-01-01 17:32:01 - 1957-06-13 00:00:00 -(66 rows) -+(14 rows) ++(15 rows) SET DateStyle TO 'European,SQL'; +ERROR: invalid value for parameter "DateStyle": "European,SQL" @@ -41035,10 +40944,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la (1 row) SELECT d1 AS european_sql FROM TIMESTAMP_TBL; - european_sql - ------------------------ -- -infinity -- infinity +- european_sql +------------------------- ++ european_sql ++--------------------- + -infinity + infinity - 01/01/1970 00:00:00 - 10/02/1997 17:32:01 - 10/02/1997 17:32:01 @@ -41104,7 +41015,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la - 01/01/2001 17:32:01 - 13/06/1957 00:00:00 -(66 rows) -+ 4714-11-24 00:00:00 BC + 1970-01-01 00:00:00 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 @@ -41118,7 +41028,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/horology.out --la + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 -+(14 rows) ++(15 rows) RESET DateStyle; -- @@ -42108,7 +42018,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Look for "toastable" types that aren't varlena. SELECT t1.oid, t1.typname -@@ -67,15 +81,361 @@ +@@ -67,15 +81,363 @@ WHERE t2.typname = ('_' || t1.typname)::name AND t2.typelem = t1.oid and t1.typarray = t2.oid) ORDER BY t1.oid; @@ -42154,224 +42064,226 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - + 100200 | interval_tbl + 100215 | timestamp_tbl + 100216 | timestamptz_tbl -+ 4294966971 | spatial_ref_sys -+ 4294966972 | geometry_columns -+ 4294966973 | geography_columns -+ 4294966975 | pg_views -+ 4294966976 | pg_user -+ 4294966977 | pg_user_mappings -+ 4294966978 | pg_user_mapping -+ 4294966979 | pg_type -+ 4294966980 | pg_ts_template -+ 4294966981 | pg_ts_parser -+ 4294966982 | pg_ts_dict -+ 4294966983 | pg_ts_config -+ 4294966984 | pg_ts_config_map -+ 4294966985 | pg_trigger -+ 4294966986 | pg_transform -+ 4294966987 | pg_timezone_names -+ 4294966988 | pg_timezone_abbrevs -+ 4294966989 | pg_tablespace -+ 4294966990 | pg_tables -+ 4294966991 | pg_subscription -+ 4294966992 | pg_subscription_rel -+ 4294966993 | pg_stats -+ 4294966994 | pg_stats_ext -+ 4294966995 | pg_statistic -+ 4294966996 | pg_statistic_ext -+ 4294966997 | pg_statistic_ext_data -+ 4294966998 | pg_statio_user_tables -+ 4294966999 | pg_statio_user_sequences -+ 4294967000 | pg_statio_user_indexes -+ 4294967001 | pg_statio_sys_tables -+ 4294967002 | pg_statio_sys_sequences -+ 4294967003 | pg_statio_sys_indexes -+ 4294967004 | pg_statio_all_tables -+ 4294967005 | pg_statio_all_sequences -+ 4294967006 | pg_statio_all_indexes -+ 4294967007 | pg_stat_xact_user_tables -+ 4294967008 | pg_stat_xact_user_functions -+ 4294967009 | pg_stat_xact_sys_tables -+ 4294967010 | pg_stat_xact_all_tables -+ 4294967011 | pg_stat_wal_receiver -+ 4294967012 | pg_stat_user_tables -+ 4294967013 | pg_stat_user_indexes -+ 4294967014 | pg_stat_user_functions -+ 4294967015 | pg_stat_sys_tables -+ 4294967016 | pg_stat_sys_indexes -+ 4294967017 | pg_stat_subscription -+ 4294967018 | pg_stat_ssl -+ 4294967019 | pg_stat_slru -+ 4294967020 | pg_stat_replication -+ 4294967021 | pg_stat_progress_vacuum -+ 4294967022 | pg_stat_progress_create_index -+ 4294967023 | pg_stat_progress_cluster -+ 4294967024 | pg_stat_progress_basebackup -+ 4294967025 | pg_stat_progress_analyze -+ 4294967026 | pg_stat_gssapi -+ 4294967027 | pg_stat_database -+ 4294967028 | pg_stat_database_conflicts -+ 4294967029 | pg_stat_bgwriter -+ 4294967030 | pg_stat_archiver -+ 4294967031 | pg_stat_all_tables -+ 4294967032 | pg_stat_all_indexes -+ 4294967033 | pg_stat_activity -+ 4294967034 | pg_shmem_allocations -+ 4294967035 | pg_shdepend -+ 4294967036 | pg_shseclabel -+ 4294967037 | pg_shdescription -+ 4294967038 | pg_shadow -+ 4294967039 | pg_settings -+ 4294967040 | pg_sequences -+ 4294967041 | pg_sequence -+ 4294967042 | pg_seclabel -+ 4294967043 | pg_seclabels -+ 4294967044 | pg_rules -+ 4294967045 | pg_roles -+ 4294967046 | pg_rewrite -+ 4294967047 | pg_replication_slots -+ 4294967048 | pg_replication_origin -+ 4294967049 | pg_replication_origin_status -+ 4294967050 | pg_range -+ 4294967051 | pg_publication_tables -+ 4294967052 | pg_publication -+ 4294967053 | pg_publication_rel -+ 4294967054 | pg_proc -+ 4294967055 | pg_prepared_xacts -+ 4294967056 | pg_prepared_statements -+ 4294967057 | pg_policy -+ 4294967058 | pg_policies -+ 4294967059 | pg_partitioned_table -+ 4294967060 | pg_opfamily -+ 4294967061 | pg_operator -+ 4294967062 | pg_opclass -+ 4294967063 | pg_namespace -+ 4294967064 | pg_matviews -+ 4294967065 | pg_locks -+ 4294967066 | pg_largeobject -+ 4294967067 | pg_largeobject_metadata -+ 4294967068 | pg_language -+ 4294967069 | pg_init_privs -+ 4294967070 | pg_inherits -+ 4294967071 | pg_indexes -+ 4294967072 | pg_index -+ 4294967073 | pg_hba_file_rules -+ 4294967074 | pg_group -+ 4294967075 | pg_foreign_table -+ 4294967076 | pg_foreign_server -+ 4294967077 | pg_foreign_data_wrapper -+ 4294967078 | pg_file_settings -+ 4294967079 | pg_extension -+ 4294967080 | pg_event_trigger -+ 4294967081 | pg_enum -+ 4294967082 | pg_description -+ 4294967083 | pg_depend -+ 4294967084 | pg_default_acl -+ 4294967085 | pg_db_role_setting -+ 4294967086 | pg_database -+ 4294967087 | pg_cursors -+ 4294967088 | pg_conversion -+ 4294967089 | pg_constraint -+ 4294967090 | pg_config -+ 4294967091 | pg_collation -+ 4294967092 | pg_class -+ 4294967093 | pg_cast -+ 4294967094 | pg_available_extensions -+ 4294967095 | pg_available_extension_versions -+ 4294967096 | pg_auth_members -+ 4294967097 | pg_authid -+ 4294967098 | pg_attribute -+ 4294967099 | pg_attrdef -+ 4294967100 | pg_amproc -+ 4294967101 | pg_amop -+ 4294967102 | pg_am -+ 4294967103 | pg_aggregate -+ 4294967105 | views -+ 4294967106 | view_table_usage -+ 4294967107 | view_routine_usage -+ 4294967108 | view_column_usage -+ 4294967109 | user_privileges -+ 4294967110 | user_mappings -+ 4294967111 | user_mapping_options -+ 4294967112 | user_defined_types -+ 4294967113 | user_attributes -+ 4294967114 | usage_privileges -+ 4294967115 | udt_privileges -+ 4294967116 | type_privileges -+ 4294967117 | triggers -+ 4294967118 | triggered_update_columns -+ 4294967119 | transforms -+ 4294967120 | tablespaces -+ 4294967121 | tablespaces_extensions -+ 4294967122 | tables -+ 4294967123 | tables_extensions -+ 4294967124 | table_privileges -+ 4294967125 | table_constraints_extensions -+ 4294967126 | table_constraints -+ 4294967127 | statistics -+ 4294967128 | st_units_of_measure -+ 4294967129 | st_spatial_reference_systems -+ 4294967130 | st_geometry_columns -+ 4294967131 | session_variables -+ 4294967132 | sequences -+ 4294967133 | schema_privileges -+ 4294967134 | schemata -+ 4294967135 | schemata_extensions -+ 4294967136 | sql_sizing -+ 4294967137 | sql_parts -+ 4294967138 | sql_implementation_info -+ 4294967139 | sql_features -+ 4294967140 | routines -+ 4294967141 | routine_privileges -+ 4294967142 | role_usage_grants -+ 4294967143 | role_udt_grants -+ 4294967144 | role_table_grants -+ 4294967145 | role_routine_grants -+ 4294967146 | role_column_grants -+ 4294967147 | resource_groups -+ 4294967148 | referential_constraints -+ 4294967149 | profiling -+ 4294967150 | processlist -+ 4294967151 | plugins -+ 4294967152 | partitions -+ 4294967153 | parameters -+ 4294967154 | optimizer_trace -+ 4294967155 | keywords -+ 4294967156 | key_column_usage -+ 4294967157 | information_schema_catalog_name -+ 4294967158 | foreign_tables -+ 4294967159 | foreign_table_options -+ 4294967160 | foreign_servers -+ 4294967161 | foreign_server_options -+ 4294967162 | foreign_data_wrappers -+ 4294967163 | foreign_data_wrapper_options -+ 4294967164 | files -+ 4294967165 | events -+ 4294967166 | engines -+ 4294967167 | enabled_roles -+ 4294967168 | element_types -+ 4294967169 | domains -+ 4294967170 | domain_udt_usage -+ 4294967171 | domain_constraints -+ 4294967172 | data_type_privileges -+ 4294967173 | constraint_table_usage -+ 4294967174 | constraint_column_usage -+ 4294967175 | columns -+ 4294967176 | columns_extensions -+ 4294967177 | column_udt_usage -+ 4294967178 | column_statistics -+ 4294967179 | column_privileges -+ 4294967180 | column_options -+ 4294967181 | column_domain_usage -+ 4294967182 | column_column_usage -+ 4294967183 | collations -+ 4294967184 | collation_character_set_applicability -+ 4294967185 | check_constraints -+ 4294967186 | check_constraint_routine_usage -+ 4294967187 | character_sets -+ 4294967188 | attributes -+ 4294967189 | applicable_roles -+ 4294967190 | administrable_role_authorizations ++ 4294966969 | spatial_ref_sys ++ 4294966970 | geometry_columns ++ 4294966971 | geography_columns ++ 4294966973 | pg_views ++ 4294966974 | pg_user ++ 4294966975 | pg_user_mappings ++ 4294966976 | pg_user_mapping ++ 4294966977 | pg_type ++ 4294966978 | pg_ts_template ++ 4294966979 | pg_ts_parser ++ 4294966980 | pg_ts_dict ++ 4294966981 | pg_ts_config ++ 4294966982 | pg_ts_config_map ++ 4294966983 | pg_trigger ++ 4294966984 | pg_transform ++ 4294966985 | pg_timezone_names ++ 4294966986 | pg_timezone_abbrevs ++ 4294966987 | pg_tablespace ++ 4294966988 | pg_tables ++ 4294966989 | pg_subscription ++ 4294966990 | pg_subscription_rel ++ 4294966991 | pg_stats ++ 4294966992 | pg_stats_ext ++ 4294966993 | pg_statistic ++ 4294966994 | pg_statistic_ext ++ 4294966995 | pg_statistic_ext_data ++ 4294966996 | pg_statio_user_tables ++ 4294966997 | pg_statio_user_sequences ++ 4294966998 | pg_statio_user_indexes ++ 4294966999 | pg_statio_sys_tables ++ 4294967000 | pg_statio_sys_sequences ++ 4294967001 | pg_statio_sys_indexes ++ 4294967002 | pg_statio_all_tables ++ 4294967003 | pg_statio_all_sequences ++ 4294967004 | pg_statio_all_indexes ++ 4294967005 | pg_stat_xact_user_tables ++ 4294967006 | pg_stat_xact_user_functions ++ 4294967007 | pg_stat_xact_sys_tables ++ 4294967008 | pg_stat_xact_all_tables ++ 4294967009 | pg_stat_wal_receiver ++ 4294967010 | pg_stat_user_tables ++ 4294967011 | pg_stat_user_indexes ++ 4294967012 | pg_stat_user_functions ++ 4294967013 | pg_stat_sys_tables ++ 4294967014 | pg_stat_sys_indexes ++ 4294967015 | pg_stat_subscription ++ 4294967016 | pg_stat_ssl ++ 4294967017 | pg_stat_slru ++ 4294967018 | pg_stat_replication ++ 4294967019 | pg_stat_progress_vacuum ++ 4294967020 | pg_stat_progress_create_index ++ 4294967021 | pg_stat_progress_cluster ++ 4294967022 | pg_stat_progress_basebackup ++ 4294967023 | pg_stat_progress_analyze ++ 4294967024 | pg_stat_gssapi ++ 4294967025 | pg_stat_database ++ 4294967026 | pg_stat_database_conflicts ++ 4294967027 | pg_stat_bgwriter ++ 4294967028 | pg_stat_archiver ++ 4294967029 | pg_stat_all_tables ++ 4294967030 | pg_stat_all_indexes ++ 4294967031 | pg_stat_activity ++ 4294967032 | pg_shmem_allocations ++ 4294967033 | pg_shdepend ++ 4294967034 | pg_shseclabel ++ 4294967035 | pg_shdescription ++ 4294967036 | pg_shadow ++ 4294967037 | pg_settings ++ 4294967038 | pg_sequences ++ 4294967039 | pg_sequence ++ 4294967040 | pg_seclabel ++ 4294967041 | pg_seclabels ++ 4294967042 | pg_rules ++ 4294967043 | pg_roles ++ 4294967044 | pg_rewrite ++ 4294967045 | pg_replication_slots ++ 4294967046 | pg_replication_origin ++ 4294967047 | pg_replication_origin_status ++ 4294967048 | pg_range ++ 4294967049 | pg_publication_tables ++ 4294967050 | pg_publication ++ 4294967051 | pg_publication_rel ++ 4294967052 | pg_proc ++ 4294967053 | pg_prepared_xacts ++ 4294967054 | pg_prepared_statements ++ 4294967055 | pg_policy ++ 4294967056 | pg_policies ++ 4294967057 | pg_partitioned_table ++ 4294967058 | pg_opfamily ++ 4294967059 | pg_operator ++ 4294967060 | pg_opclass ++ 4294967061 | pg_namespace ++ 4294967062 | pg_matviews ++ 4294967063 | pg_locks ++ 4294967064 | pg_largeobject ++ 4294967065 | pg_largeobject_metadata ++ 4294967066 | pg_language ++ 4294967067 | pg_init_privs ++ 4294967068 | pg_inherits ++ 4294967069 | pg_indexes ++ 4294967070 | pg_index ++ 4294967071 | pg_hba_file_rules ++ 4294967072 | pg_group ++ 4294967073 | pg_foreign_table ++ 4294967074 | pg_foreign_server ++ 4294967075 | pg_foreign_data_wrapper ++ 4294967076 | pg_file_settings ++ 4294967077 | pg_extension ++ 4294967078 | pg_event_trigger ++ 4294967079 | pg_enum ++ 4294967080 | pg_description ++ 4294967081 | pg_depend ++ 4294967082 | pg_default_acl ++ 4294967083 | pg_db_role_setting ++ 4294967084 | pg_database ++ 4294967085 | pg_cursors ++ 4294967086 | pg_conversion ++ 4294967087 | pg_constraint ++ 4294967088 | pg_config ++ 4294967089 | pg_collation ++ 4294967090 | pg_class ++ 4294967091 | pg_cast ++ 4294967092 | pg_available_extensions ++ 4294967093 | pg_available_extension_versions ++ 4294967094 | pg_auth_members ++ 4294967095 | pg_authid ++ 4294967096 | pg_attribute ++ 4294967097 | pg_attrdef ++ 4294967098 | pg_amproc ++ 4294967099 | pg_amop ++ 4294967100 | pg_am ++ 4294967101 | pg_aggregate ++ 4294967103 | views ++ 4294967104 | view_table_usage ++ 4294967105 | view_routine_usage ++ 4294967106 | view_column_usage ++ 4294967107 | user_privileges ++ 4294967108 | user_mappings ++ 4294967109 | user_mapping_options ++ 4294967110 | user_defined_types ++ 4294967111 | user_attributes ++ 4294967112 | usage_privileges ++ 4294967113 | udt_privileges ++ 4294967114 | type_privileges ++ 4294967115 | triggers ++ 4294967116 | triggered_update_columns ++ 4294967117 | transforms ++ 4294967118 | tablespaces ++ 4294967119 | tablespaces_extensions ++ 4294967120 | tables ++ 4294967121 | tables_extensions ++ 4294967122 | table_privileges ++ 4294967123 | table_constraints_extensions ++ 4294967124 | table_constraints ++ 4294967125 | statistics ++ 4294967126 | st_units_of_measure ++ 4294967127 | st_spatial_reference_systems ++ 4294967128 | st_geometry_columns ++ 4294967129 | session_variables ++ 4294967130 | sequences ++ 4294967131 | schema_privileges ++ 4294967132 | schemata ++ 4294967133 | schemata_extensions ++ 4294967134 | sql_sizing ++ 4294967135 | sql_parts ++ 4294967136 | sql_implementation_info ++ 4294967137 | sql_features ++ 4294967138 | routines ++ 4294967139 | routine_privileges ++ 4294967140 | role_usage_grants ++ 4294967141 | role_udt_grants ++ 4294967142 | role_table_grants ++ 4294967143 | role_routine_grants ++ 4294967144 | role_column_grants ++ 4294967145 | resource_groups ++ 4294967146 | referential_constraints ++ 4294967147 | profiling ++ 4294967148 | processlist ++ 4294967149 | plugins ++ 4294967150 | partitions ++ 4294967151 | parameters ++ 4294967152 | optimizer_trace ++ 4294967153 | keywords ++ 4294967154 | key_column_usage ++ 4294967155 | information_schema_catalog_name ++ 4294967156 | foreign_tables ++ 4294967157 | foreign_table_options ++ 4294967158 | foreign_servers ++ 4294967159 | foreign_server_options ++ 4294967160 | foreign_data_wrappers ++ 4294967161 | foreign_data_wrapper_options ++ 4294967162 | files ++ 4294967163 | events ++ 4294967164 | engines ++ 4294967165 | enabled_roles ++ 4294967166 | element_types ++ 4294967167 | domains ++ 4294967168 | domain_udt_usage ++ 4294967169 | domain_constraints ++ 4294967170 | data_type_privileges ++ 4294967171 | constraint_table_usage ++ 4294967172 | constraint_column_usage ++ 4294967173 | columns ++ 4294967174 | columns_extensions ++ 4294967175 | column_udt_usage ++ 4294967176 | column_statistics ++ 4294967177 | column_privileges ++ 4294967178 | column_options ++ 4294967179 | column_domain_usage ++ 4294967180 | column_column_usage ++ 4294967181 | collations ++ 4294967182 | collation_character_set_applicability ++ 4294967183 | check_constraints ++ 4294967184 | check_constraint_routine_usage ++ 4294967185 | character_sets ++ 4294967186 | attributes ++ 4294967187 | applicable_roles ++ 4294967188 | administrable_role_authorizations ++ 4294967190 | fully_qualified_names ++ 4294967191 | logical_replication_node_processors + 4294967192 | cluster_replication_node_stream_checkpoints + 4294967193 | cluster_replication_node_stream_spans + 4294967194 | cluster_replication_node_streams @@ -42475,11 +42387,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - + 4294967292 | builtin_functions + 4294967293 | node_build_info + 4294967294 | backward_dependencies -+(352 rows) ++(354 rows) -- Make sure typarray points to a "true" array type of our own base SELECT t1.oid, t1.typname as basetype, t2.typname as arraytype, -@@ -84,10 +444,7 @@ +@@ -84,10 +446,7 @@ WHERE t1.typarray <> 0 AND (t2.oid IS NULL OR t2.typsubscript <> 'array_subscript_handler'::regproc); @@ -42491,7 +42403,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Look for range types that do not have a pg_range entry SELECT t1.oid, t1.typname FROM pg_type as t1 -@@ -128,10 +485,7 @@ +@@ -128,10 +487,7 @@ (p1.pronargs = 3 AND p1.proargtypes[0] = 'cstring'::regtype AND p1.proargtypes[1] = 'oid'::regtype AND p1.proargtypes[2] = 'int4'::regtype)); @@ -42503,7 +42415,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- As of 8.0, this check finds refcursor, which is borrowing -- other types' I/O routines SELECT t1.oid, t1.typname, p1.oid, p1.proname -@@ -140,10 +494,9 @@ +@@ -140,10 +496,9 @@ (t1.typelem != 0 AND t1.typlen < 0) AND NOT (p1.prorettype = t1.oid AND NOT p1.proretset) ORDER BY 1; @@ -42517,7 +42429,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Varlena array types will point to array_in -- Exception as of 8.1: int2vector and oidvector have their own I/O routines -@@ -153,10 +506,10 @@ +@@ -153,10 +508,10 @@ (t1.typelem != 0 AND t1.typlen < 0) AND NOT (p1.oid = 'array_in'::regproc) ORDER BY 1; @@ -42532,7 +42444,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - (2 rows) -- typinput routines should not be volatile -@@ -172,14 +525,11 @@ +@@ -172,14 +527,11 @@ FROM pg_type AS t1 WHERE t1.typtype not in ('b', 'p') ORDER BY 1; @@ -42550,7 +42462,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Check for bogus typoutput routines -- As of 8.0, this check finds refcursor, which is borrowing -@@ -192,19 +542,15 @@ +@@ -192,19 +544,15 @@ (p1.oid = 'array_out'::regproc AND t1.typelem != 0 AND t1.typlen = -1))) ORDER BY 1; @@ -42574,7 +42486,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- typoutput routines should not be volatile SELECT t1.oid, t1.typname, p1.oid, p1.proname FROM pg_type AS t1, pg_proc AS p1 -@@ -218,13 +564,11 @@ +@@ -218,13 +566,11 @@ FROM pg_type AS t1 WHERE t1.typtype not in ('b', 'd', 'p') ORDER BY 1; @@ -42591,7 +42503,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Domains should have same typoutput as their base types SELECT t1.oid, t1.typname, t2.oid, t2.typname -@@ -244,10 +588,7 @@ +@@ -244,10 +590,7 @@ (p1.pronargs = 3 AND p1.proargtypes[0] = 'internal'::regtype AND p1.proargtypes[1] = 'oid'::regtype AND p1.proargtypes[2] = 'int4'::regtype)); @@ -42603,7 +42515,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- As of 7.4, this check finds refcursor, which is borrowing -- other types' I/O routines SELECT t1.oid, t1.typname, p1.oid, p1.proname -@@ -256,10 +597,9 @@ +@@ -256,10 +599,9 @@ (t1.typelem != 0 AND t1.typlen < 0) AND NOT (p1.prorettype = t1.oid AND NOT p1.proretset) ORDER BY 1; @@ -42617,7 +42529,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Varlena array types will point to array_recv -- Exception as of 8.1: int2vector and oidvector have their own I/O routines -@@ -271,8 +611,8 @@ +@@ -271,8 +613,8 @@ ORDER BY 1; oid | typname | oid | proname -----+------------+------+---------------- @@ -42628,7 +42540,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - (2 rows) -- Suspicious if typreceive doesn't take same number of args as typinput -@@ -297,14 +637,11 @@ +@@ -297,14 +639,11 @@ FROM pg_type AS t1 WHERE t1.typtype not in ('b', 'p') ORDER BY 1; @@ -42646,7 +42558,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Check for bogus typsend routines -- As of 7.4, this check finds refcursor, which is borrowing -@@ -317,10 +654,9 @@ +@@ -317,10 +656,9 @@ (p1.oid = 'array_send'::regproc AND t1.typelem != 0 AND t1.typlen = -1))) ORDER BY 1; @@ -42660,7 +42572,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - SELECT t1.oid, t1.typname, p1.oid, p1.proname FROM pg_type AS t1, pg_proc AS p1 -@@ -343,13 +679,11 @@ +@@ -343,13 +681,11 @@ FROM pg_type AS t1 WHERE t1.typtype not in ('b', 'd', 'p') ORDER BY 1; @@ -42677,7 +42589,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Domains should have same typsend as their base types SELECT t1.oid, t1.typname, t2.oid, t2.typname -@@ -366,10 +700,7 @@ +@@ -366,10 +702,7 @@ (p1.pronargs = 1 AND p1.proargtypes[0] = 'cstring[]'::regtype AND p1.prorettype = 'int4'::regtype AND NOT p1.proretset); @@ -42689,7 +42601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- typmodin routines should not be volatile SELECT t1.oid, t1.typname, p1.oid, p1.proname FROM pg_type AS t1, pg_proc AS p1 -@@ -385,10 +716,7 @@ +@@ -385,10 +718,7 @@ (p1.pronargs = 1 AND p1.proargtypes[0] = 'int4'::regtype AND p1.prorettype = 'cstring'::regtype AND NOT p1.proretset); @@ -42701,7 +42613,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- typmodout routines should not be volatile SELECT t1.oid, t1.typname, p1.oid, p1.proname FROM pg_type AS t1, pg_proc AS p1 -@@ -409,7 +737,8 @@ +@@ -409,7 +739,8 @@ -- Array types should have same typdelim as their element types SELECT t1.oid, t1.typname, t2.oid, t2.typname FROM pg_type AS t1, pg_type AS t2 @@ -42711,7 +42623,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - oid | typname | oid | typname -----+---------+-----+--------- (0 rows) -@@ -428,29 +757,20 @@ +@@ -428,29 +759,20 @@ SELECT t1.oid, t1.typname, t1.typelem FROM pg_type AS t1 WHERE t1.typelem != 0 AND t1.typsubscript = 0; @@ -42744,7 +42656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Check for bogus typanalyze routines SELECT t1.oid, t1.typname, p1.oid, p1.proname FROM pg_type AS t1, pg_proc AS p1 -@@ -458,10 +778,7 @@ +@@ -458,10 +780,7 @@ (p1.pronargs = 1 AND p1.proargtypes[0] = 'internal'::regtype AND p1.prorettype = 'bool'::regtype AND NOT p1.proretset); @@ -42756,7 +42668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- there does not seem to be a reason to care about volatility of typanalyze -- domains inherit their base type's typanalyze SELECT d.oid, d.typname, d.typanalyze, t.oid, t.typname, t.typanalyze -@@ -477,10 +794,7 @@ +@@ -477,10 +796,7 @@ FROM pg_type t LEFT JOIN pg_range r on t.oid = r.rngtypid WHERE t.typbasetype = 0 AND (t.typanalyze = 'range_typanalyze'::regproc) != (r.rngtypid IS NOT NULL); @@ -42768,7 +42680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- array_typanalyze should be used for all and only array types -- (but exclude domains, which we checked above) -- As of 9.2 this finds int2vector and oidvector, which are weird anyway -@@ -490,12 +804,7 @@ +@@ -490,12 +806,7 @@ (t.typanalyze = 'array_typanalyze'::regproc) != (t.typsubscript = 'array_subscript_handler'::regproc) ORDER BY 1; @@ -42782,7 +42694,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- **************** pg_class **************** -- Look for illegal values in pg_class fields SELECT c1.oid, c1.relname -@@ -535,14 +844,10 @@ +@@ -535,14 +846,10 @@ (0 rows) -- Tables, matviews etc should have AMs of type 't' @@ -42801,7 +42713,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- **************** pg_attribute **************** -- Look for illegal values in pg_attribute fields SELECT a1.attrelid, a1.attname -@@ -555,22 +860,46 @@ +@@ -555,22 +862,46 @@ (0 rows) -- Cross-check attnum against parent relation @@ -42846,19 +42758,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - + 1460608405 | table_spans_descriptor_id_idx + 115531396 | tables_parent_id_idx + 115531397 | tables_database_name_idx -+ 3436586999 | pg_attrdef_adrelid_idx -+ 2091509990 | pg_attribute_attrelid_idx -+ 1572037448 | pg_class_oid_idx -+ 4002455197 | pg_constraint_conrelid_idx -+ 134169115 | pg_namespace_oid_idx -+ 959773586 | pg_proc_oid_idx -+ 3573889688 | pg_timezone_names_name_idx -+ 1403208208 | pg_type_oid_idx ++ 1831773717 | pg_attrdef_adrelid_idx ++ 486696708 | pg_attribute_attrelid_idx ++ 4262191470 | pg_class_oid_idx ++ 2397641923 | pg_constraint_conrelid_idx ++ 2824323129 | pg_namespace_oid_idx ++ 3649927600 | pg_proc_oid_idx ++ 883735674 | pg_timezone_names_name_idx ++ 3008021490 | pg_type_oid_idx +(28 rows) -- Cross-check against pg_type entry -- NOTE: we allow attstorage to be 'plain' even when typstorage is not; -@@ -613,10 +942,7 @@ +@@ -613,10 +944,7 @@ EXISTS(select 1 from pg_catalog.pg_type where oid = r.rngsubtype and typelem != 0 and typsubscript = 'array_subscript_handler'::regproc))); @@ -42870,7 +42782,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- canonical function, if any, had better match the range type SELECT r.rngtypid, r.rngsubtype, p.proname FROM pg_range r JOIN pg_proc p ON p.oid = r.rngcanonical -@@ -639,10 +965,7 @@ +@@ -639,10 +967,7 @@ SELECT r.rngtypid, r.rngsubtype, r.rngmultitypid FROM pg_range r WHERE r.rngmultitypid IS NULL OR r.rngmultitypid = 0; @@ -42882,7 +42794,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - -- Create a table that holds all the known in-core data types and leave it -- around so as pg_upgrade is able to test their binary compatibility. CREATE TABLE tab_core_types AS SELECT -@@ -709,6 +1032,13 @@ +@@ -709,6 +1034,13 @@ '{(2020-01-02 03:04:05, 2021-02-03 06:07:08)}'::tsmultirange, '(2020-01-02 03:04:05, 2021-02-03 06:07:08)'::tstzrange, '{(2020-01-02 03:04:05, 2021-02-03 06:07:08)}'::tstzmultirange; @@ -42892,11 +42804,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/type_sanity.out - + '(11,12)'::point, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Sanity check on the previous table, checking that all core types are -- included in this table. SELECT oid, typname, typtype, typelem, typarray -@@ -736,7 +1066,4 @@ +@@ -736,7 +1068,4 @@ WHERE a.atttypid=t.oid AND a.attnum > 0 AND a.attrelid='tab_core_types'::regclass); @@ -43087,7 +42999,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- -- Tests for ScalarArrayOpExpr with a hashfn -- @@ -43100,7 +43012,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +create type myint + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create function myintin(cstring) returns myint strict immutable language internal as 'int4in'; -NOTICE: return type myint is only a shell @@ -43119,7 +43031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +create type myint (input = myintin, output = myintout, like = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create cast (int4 as myint) without function; +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -43178,7 +43090,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +create operator = ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create operator <> ( leftarg = myint, rightarg = myint, @@ -43191,7 +43103,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +create operator <> ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create operator class myint_ops default for type myint using hash as operator 1 = (myint, myint), @@ -43201,7 +43113,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/expressions.out - +create operator class myint_ops + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create table inttest (a myint); +ERROR: current transaction is aborted, commands ignored until end of transaction block insert into inttest values(1::myint),(null); @@ -44112,7 +44024,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copyselect.out -- +copy (select t from test1 where id = 1) to stdout csv header force quote t + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41608/v24.2 ++See: https://go.crdb.dev/issue-v/41608/dev -- -- Test psql builtins, plain table -- @@ -44640,27 +44552,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copydml.out --lab -- triggers create function qqq_trig() returns trigger as $$ begin -@@ -92,21 +439,29 @@ +@@ -92,21 +439,25 @@ end if; end $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger qqqbef before insert or update or delete on copydml_test for each row execute procedure qqq_trig(); -+ERROR: at or near "qqqbef": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger qqqbef before insert or update or delete on copydml_test -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger qqqaf after insert or update or delete on copydml_test for each row execute procedure qqq_trig(); -+ERROR: at or near "qqqaf": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger qqqaf after insert or update or delete on copydml_test -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev copy (insert into copydml_test (t) values ('f') returning id) to stdout; -NOTICE: BEFORE INSERT 8 -8 @@ -44701,7 +44609,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= +copy copytest to ':abs_builddir/results/copytest.csv' csv + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/97181/v24.2 ++See: https://go.crdb.dev/issue-v/97181/dev create temp table copytest2 (like copytest); copy copytest2 from :'filename' csv; +ERROR: at or near ":abs_builddir/results/copytest.csv": syntax error: unimplemented: this syntax @@ -44742,7 +44650,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= +copy copytest to ':abs_builddir/results/copytest.csv' csv quote '''' escape E'\\' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/97181/v24.2 ++See: https://go.crdb.dev/issue-v/97181/dev copy copytest2 from :'filename' csv quote '''' escape E'\\'; +ERROR: at or near ":abs_builddir/results/copytest.csv": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -44776,7 +44684,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= -- test header line feature create temp table copytest3 ( -@@ -46,81 +104,169 @@ +@@ -46,81 +104,165 @@ c1 int, "colname with tab: " text); copy copytest4 from stdin (header); @@ -44837,7 +44745,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= +copy (select * from parted_copytest order by a) to ':abs_builddir/results/parted_copytest.csv' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/96590/v24.2 ++See: https://go.crdb.dev/issue-v/96590/dev truncate parted_copytest; +ERROR: relation "parted_copytest" does not exist copy parted_copytest from :'filename'; @@ -44895,17 +44803,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger part_ins_trig before insert on parted_copytest_a2 for each row execute procedure part_ins_func(); -+ERROR: at or near "part_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger part_ins_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev copy parted_copytest from :'filename'; +ERROR: at or near ":abs_builddir/results/parted_copytest.csv": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -44936,12 +44843,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= create index on parted_copytest (b); +ERROR: relation "parted_copytest" does not exist drop trigger part_ins_trig on parted_copytest_a2; -+ERROR: at or near "part_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger part_ins_trig on parted_copytest_a2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev copy parted_copytest from stdin; +ERROR: relation "parted_copytest" does not exist +1 1 str1 @@ -44971,7 +44875,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= -- -- Progress reporting for COPY -- -@@ -131,6 +277,15 @@ +@@ -131,6 +273,15 @@ salary int4, manager name ); @@ -44983,25 +44887,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= + location point, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- Add a trigger to catch and print the contents of the catalog view -- pg_stat_progress_copy during data insertion. This allows to test -- the validation of some progress reports for COPY FROM where the trigger -@@ -160,22 +315,60 @@ +@@ -160,22 +311,56 @@ return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger check_after_tab_progress_reporting after insert on tab_progress_reporting for each statement execute function notice_after_tab_progress_reporting(); -+ERROR: at or near "check_after_tab_progress_reporting": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_after_tab_progress_reporting -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Generate COPY FROM report with PIPE. copy tab_progress_reporting from stdin; -INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 0, "tuples_processed": 3, "has_bytes_processed": true} @@ -45037,12 +44940,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= +We appreciate your feedback. + drop trigger check_after_tab_progress_reporting on tab_progress_reporting; -+ERROR: at or near "check_after_tab_progress_reporting": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger check_after_tab_progress_reporting on tab_progress_reporting -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop function notice_after_tab_progress_reporting(); +ERROR: unknown function: notice_after_tab_progress_reporting() drop table tab_progress_reporting; @@ -45050,7 +44950,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= -- Test header matching feature create table header_copytest ( a int, -@@ -186,57 +379,173 @@ +@@ -186,57 +371,173 @@ alter table header_copytest drop column c; alter table header_copytest add column c text; copy header_copytest to stdout with (header match); @@ -45060,7 +44960,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/copy.out --label= +copy header_copytest to stdout with (header match) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/97181/v24.2 ++See: https://go.crdb.dev/issue-v/97181/dev copy header_copytest from stdin with (header wrong_choice); -ERROR: header requires a Boolean value or "match" +ERROR: at or near "wrong_choice": syntax error: unimplemented: this syntax @@ -45958,7 +45858,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert_conflict.o +) inherits (cities) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- Create unique indexes. Due to a general limitation of inheritance, -- uniqueness is only enforced per-relation. Unique index inference -- specification will do the right thing, though. @@ -46077,7 +45977,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert_conflict.o +create table twoconstraints (f1 int unique, f2 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev insert into twoconstraints values(1, '((0,0),(1,1))'); +ERROR: relation "twoconstraints" does not exist insert into twoconstraints values(1, '((2,2),(3,3))'); -- fail on f1 @@ -46380,22 +46280,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert_conflict.o -- test with statement level triggers create or replace function parted_conflict_update_func() returns trigger as $$ declare -@@ -852,15 +943,26 @@ +@@ -852,15 +943,25 @@ return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger parted_conflict_update after update on parted_conflict referencing new table as inserted for each statement execute procedure parted_conflict_update_func(); -+ERROR: at or near "parted_conflict_update": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parted_conflict_update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev truncate parted_conflict; +ERROR: relation "parted_conflict" does not exist insert into parted_conflict values (0, 'cero', 1); @@ -46517,21 +46416,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe +insert into inserttest (f3.if1, f3.if2) values (1,array['foo']) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}'); +ERROR: at or near "if1": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}'; +ERROR: at or near "if1": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if1, f3.if2) values (1,default); -- not supported -ERROR: cannot set a subfield to DEFAULT -LINE 1: insert into inserttest (f3.if1, f3.if2) values (1,default); @@ -46541,28 +46440,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe +insert into inserttest (f3.if1, f3.if2) values (1,default) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'); +ERROR: at or near "if2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux'); +ERROR: at or near "if2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer'; +ERROR: at or near "if2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'); +ERROR: at or near "[": syntax error +DETAIL: source SQL: @@ -47339,19 +47238,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe -- have a BR trigger modify the row such that the check_b is violated create function mlparted11_trig_fn() returns trigger AS -@@ -538,445 +768,712 @@ +@@ -538,445 +768,702 @@ end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger mlparted11_trig before insert ON mlparted11 for each row execute procedure mlparted11_trig_fn(); -+ERROR: at or near "mlparted11_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger mlparted11_trig before insert ON mlparted11 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- check that the correct row is shown when constraint check_b fails after -- "(1, 2)" is routed to mlparted11 (actually "(1, 4)" would be shown due -- to the BR trigger mlparted11_trig_fn) @@ -47360,12 +47258,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe -DETAIL: Failing row contains (1, 4). +ERROR: relation "mlparted" does not exist drop trigger mlparted11_trig on mlparted11; -+ERROR: at or near "mlparted11_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger mlparted11_trig on mlparted11 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop function mlparted11_trig_fn(); +ERROR: unknown function: mlparted11_trig_fn() -- check that inserting into an internal partition successfully results in @@ -47469,14 +47364,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe -DETAIL: Failing row contains (1, 45, a). +ERROR: relation "mlparted" does not exist create function mlparted5abrtrig_func() returns trigger as $$ begin new.c = 'b'; return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func(); -+ERROR: at or near "mlparted5abrtrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into mlparted5 (a, b, c) values (1, 40, 'a'); -ERROR: new row for relation "mlparted5a" violates partition constraint -DETAIL: Failing row contains (b, 1, 40). @@ -47857,14 +47751,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe + ^ +HINT: try \h CREATE TABLE create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf(); -+ERROR: at or near "brtrigpartcon1trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into brtrigpartcon values (1, 'hi there'); -ERROR: new row for relation "brtrigpartcon1" violates partition constraint -DETAIL: Failing row contains (2, hi there). @@ -47910,21 +47803,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/insert.out --labe create table donothingbrtrig_test2 (c text, b text, a int); alter table donothingbrtrig_test2 drop column c; create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func(); -+ERROR: at or near "donothingbrtrig1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func(); -+ERROR: at or near "donothingbrtrig2": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1); +ERROR: at or near "attach": syntax error +DETAIL: source SQL: @@ -48290,7 +48179,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_c -ERROR: could not access file "nosuchfile": No such file or directory +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev -- To produce stable regression test output, we have to filter the name -- of the regresslib file out of the error message in this test. \set VERBOSITY sqlstate @@ -48324,7 +48213,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR ## ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR @#@ ( rightarg = int8, -- prefix procedure = factorial @@ -48334,7 +48223,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR @#@ ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR #%# ( leftarg = int8, -- fail, postfix is no longer supported procedure = factorial @@ -48346,7 +48235,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #%# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- Test operator created above SELECT @#@ 24; - ?column? @@ -48442,7 +48331,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR => ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- lexing of <=, >=, <>, != has a number of edge cases -- (=> is tested elsewhere) -- this is legal because ! is not allowed in sql ops @@ -48455,7 +48344,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR !=- ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT !=- 10; - ?column? ----------- @@ -48500,7 +48389,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR schema_op1.#*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. SETOF type functions not allowed as argument (testing leftarg) BEGIN TRANSACTION; @@ -48514,7 +48403,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. SETOF type functions not allowed as argument (testing rightarg) BEGIN TRANSACTION; @@ -48528,7 +48417,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should work. Sample text-book case BEGIN TRANSACTION; @@ -48549,7 +48438,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR === ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. Invalid attribute CREATE OPERATOR #@%# ( @@ -48563,7 +48452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #@%# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- Should fail. At least rightarg should be mandatorily specified CREATE OPERATOR #@%# ( procedure = factorial @@ -48574,7 +48463,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #@%# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- Should fail. Procedure should be mandatorily specified CREATE OPERATOR #@%# ( rightarg = int8 @@ -48585,7 +48474,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #@%# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- Should fail. CREATE OPERATOR requires USAGE on TYPE BEGIN TRANSACTION; CREATE ROLE regress_rol_op3; @@ -48614,7 +48503,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. CREATE OPERATOR requires USAGE on TYPE (need to check separately for rightarg) BEGIN TRANSACTION; @@ -48644,7 +48533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. CREATE OPERATOR requires EXECUTE on function BEGIN TRANSACTION; @@ -48674,7 +48563,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- Should fail. CREATE OPERATOR requires USAGE on return TYPE BEGIN TRANSACTION; @@ -48704,7 +48593,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR #*# ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ROLLBACK; -- invalid: non-lowercase quoted identifiers CREATE OPERATOR === @@ -48727,7 +48616,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_operator.o +CREATE OPERATOR === + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out --label=/mnt/data1/postgres/src/test/regress/results/create_type.out /mnt/data1/postgres/src/test/regress/expected/create_type.out /mnt/data1/postgres/src/test/regress/results/create_type.out --- /mnt/data1/postgres/src/test/regress/expected/create_type.out +++ /mnt/data1/postgres/src/test/regress/results/create_type.out @@ -48749,7 +48638,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -DETAIL: Creating a shell type definition. +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION widget_out(widget) RETURNS cstring AS :'regresslib' @@ -48757,7 +48646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -NOTICE: argument type widget is only a shell +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION int44in(cstring) RETURNS city_budget AS :'regresslib' @@ -48766,7 +48655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -DETAIL: Creating a shell type definition. +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION int44out(city_budget) RETURNS cstring AS :'regresslib' @@ -48774,7 +48663,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -NOTICE: argument type city_budget is only a shell +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE TYPE widget ( internallength = 24, input = widget_in, @@ -48788,7 +48677,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + internallength = 24, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE TYPE city_budget ( internallength = 16, input = int44in, @@ -48802,7 +48691,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + internallength = 16, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- Test creation and destruction of shell types CREATE TYPE shell; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -48810,7 +48699,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE shell + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE TYPE shell; -- fail, type already present -ERROR: type "shell" already exists +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -48818,7 +48707,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE shell + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev DROP TYPE shell; +ERROR: type "shell" does not exist DROP TYPE shell; -- fail, type not exist @@ -48830,7 +48719,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE myshell + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- -- Test type-related default values (broken in releases before PG 7.2) -- @@ -48843,14 +48732,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE int42 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE TYPE text_w_default; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TYPE text_w_default + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- Make dummy I/O routines using the existing internal support for int4, text CREATE FUNCTION int42_in(cstring) RETURNS int42 @@ -48889,7 +48778,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + internallength = 4, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE TYPE text_w_default ( internallength = variable, input = text_w_default_in, @@ -48903,7 +48792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + internallength = variable, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE TABLE default_test (f1 text_w_default, f2 int42); +ERROR: type "text_w_default" does not exist INSERT INTO default_test DEFAULT VALUES; @@ -48922,11 +48811,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE bogus_type + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- invalid: non-lowercase quoted identifiers CREATE TYPE bogus_type ( "Internallength" = 4, -@@ -119,116 +189,135 @@ +@@ -119,116 +189,126 @@ "Default" = 42, "Passedbyvalue" ); @@ -48955,7 +48844,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + "Internallength" = 4, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- invalid: input/output function incompatibility CREATE TYPE bogus_type (INPUT = array_in, OUTPUT = array_out, @@ -48967,7 +48856,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE bogus_type (INPUT = array_in, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev DROP TYPE bogus_type; +ERROR: type "bogus_type" does not exist -- It no longer is possible to issue CREATE TYPE without making a shell first @@ -48982,7 +48871,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE bogus_type (INPUT = array_in, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- Test stand-alone composite type CREATE TYPE default_test_row AS (f1 text_w_default, f2 int42); +ERROR: type "text_w_default" does not exist @@ -48999,21 +48888,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +ERROR: unknown function: get_default_test() -- Test comments COMMENT ON TYPE bad IS 'bad comment'; --ERROR: type "bad" does not exist -+ERROR: at or near "type": syntax error -+DETAIL: source SQL: -+COMMENT ON TYPE bad IS 'bad comment' -+ ^ + ERROR: type "bad" does not exist COMMENT ON TYPE default_test_row IS 'good comment'; -+ERROR: at or near "type": syntax error -+DETAIL: source SQL: -+COMMENT ON TYPE default_test_row IS 'good comment' -+ ^ ++ERROR: type "default_test_row" does not exist COMMENT ON TYPE default_test_row IS NULL; -+ERROR: at or near "type": syntax error -+DETAIL: source SQL: -+COMMENT ON TYPE default_test_row IS NULL -+ ^ ++ERROR: type "default_test_row" does not exist COMMENT ON COLUMN default_test_row.nope IS 'bad comment'; -ERROR: column "nope" of relation "default_test_row" does not exist +ERROR: relation "default_test_row" does not exist @@ -49029,12 +48908,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE text_w_default + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev DROP TYPE default_test_row CASCADE; -NOTICE: drop cascades to function get_default_test() +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev DROP TABLE default_test; +ERROR: relation "default_test" does not exist -- Check dependencies are established when creating a new type @@ -49044,7 +48923,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE base_type + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE FUNCTION base_fn_in(cstring) RETURNS base_type AS 'boolin' LANGUAGE internal IMMUTABLE STRICT; -NOTICE: return type base_type is only a shell @@ -49059,7 +48938,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE base_type(INPUT = base_fn_in, OUTPUT = base_fn_out) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev DROP FUNCTION base_fn_in(cstring); -- error -ERROR: cannot drop function base_fn_in(cstring) because other objects depend on it -DETAIL: type base_type depends on function base_fn_in(cstring) @@ -49084,7 +48963,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -drop cascades to function base_fn_out(base_type) +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- Check usage of typmod with a user-defined type -- (we have borrowed numeric's typmod functions) CREATE TEMP TABLE mytab (foo widget(42,13,7)); -- should fail @@ -49124,7 +49003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -- and test format_type() a bit more, too select format_type('varchar'::regtype, 42); format_type -@@ -251,34 +340,28 @@ +@@ -251,34 +331,28 @@ -- Test non-error-throwing APIs using widget, which still throws errors SELECT pg_input_is_valid('(1,2,3)', 'widget'); @@ -49167,11 +49046,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE FUNCTION pt_in_widget(point, widget) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE OPERATOR <% ( leftarg = point, rightarg = widget, -@@ -286,60 +369,161 @@ +@@ -286,60 +360,161 @@ commutator = >% , negator = >=% ); @@ -49180,7 +49059,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE OPERATOR <% ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT point '(1,2)' <% widget '(0,0,3)' AS t, point '(1,2)' <% widget '(0,0,1)' AS f; - t | f @@ -49205,7 +49084,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + location box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO city VALUES ('Podunk', '(1,2),(3,4)', '100,127,1000'), ('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789'); @@ -49228,7 +49107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE TYPE myvarchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin'; -NOTICE: return type myvarchar is only a shell @@ -49325,7 +49204,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - + input = myvarcharin, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- want to check updating of a domain over the target type, too CREATE DOMAIN myvarchardom AS myvarchar; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -49333,7 +49212,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - +CREATE DOMAIN myvarchardom AS myvarchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev ALTER TYPE myvarchar SET (storage = plain); -- not allowed -ERROR: cannot change type's storage to PLAIN +ERROR: at or near "(": syntax error @@ -49350,7 +49229,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - ALTER TYPE myvarchar SET ( send = myvarcharsend, receive = myvarcharrecv, -@@ -349,59 +533,33 @@ +@@ -349,59 +524,33 @@ analyze = ts_typanalyze, subscript = raw_array_subscript_handler ); @@ -49423,7 +49302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_type.out - -drop cascades to type myvarchardom +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_procedure.out --label=/mnt/data1/postgres/src/test/regress/results/create_procedure.out /mnt/data1/postgres/src/test/regress/expected/create_procedure.out /mnt/data1/postgres/src/test/regress/results/create_procedure.out --- /mnt/data1/postgres/src/test/regress/expected/create_procedure.out +++ /mnt/data1/postgres/src/test/regress/results/create_procedure.out @@ -49852,7 +49731,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_procedure. +CREATE PROCEDURE ptest11(a OUT int, VARIADIC b int[]) LANGUAGE SQL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev CALL ptest11(null, 11, 12, 13); - a ----- @@ -49993,7 +49872,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_procedure. +CREATE PROCEDURE ptestx(a VARIADIC int[], b OUT int) LANGUAGE SQL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev CREATE PROCEDURE ptestx(a int DEFAULT 42, b OUT int) LANGUAGE SQL AS $$ SELECT a $$; ERROR: procedure OUT parameters cannot appear after one with a default value @@ -50331,7 +50210,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - +) INHERITS (a_star) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE c_star ( c name ) INHERITS (a_star); @@ -50342,7 +50221,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - +) INHERITS (a_star) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE d_star ( d float8 ) INHERITS (b_star, c_star); @@ -50355,7 +50234,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - +) INHERITS (b_star, c_star) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE e_star ( e int2 ) INHERITS (c_star); @@ -50366,7 +50245,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - +) INHERITS (c_star) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE f_star ( f polygon ) INHERITS (e_star); @@ -50377,7 +50256,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - +) INHERITS (e_star) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO a_star (class, a) VALUES ('a', 1); INSERT INTO a_star (class, a) VALUES ('a', 2); INSERT INTO a_star (class) VALUES ('a'); @@ -50469,7 +50348,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', 19, 'hi claire'::name, '-5'::int2, '(1,3),(2,4)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, a, c, e) VALUES ('f', 20, 'hi mike'::name, '-6'::int2); +ERROR: relation "f_star" does not exist @@ -50481,7 +50360,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', 21, 'hi marcel'::name, '(11,44),(22,55),(33,66)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, a, e, f) VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon); +ERROR: at or near ")": syntax error: unimplemented: this syntax @@ -50490,7 +50369,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, c, e, f) VALUES ('f', 'hi keith'::name, '-8'::int2, '(1111,3333),(2222,4444)'::polygon); @@ -50501,7 +50380,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + '(1111,3333),(2222,4444)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, a, c) VALUES ('f', 24, 'hi marc'::name); +ERROR: relation "f_star" does not exist @@ -50516,7 +50395,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, c, e) VALUES ('f', 'hi allison'::name, '-10'::int2); +ERROR: relation "f_star" does not exist @@ -50530,7 +50409,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + '(111111,333333),(222222,444444)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, e, f) VALUES ('f', '-11'::int2, '(1111111,3333333),(2222222,4444444)'::polygon); +ERROR: at or near ")": syntax error: unimplemented: this syntax @@ -50539,7 +50418,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', '-11'::int2, '(1111111,3333333),(2222222,4444444)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class, a) VALUES ('f', 27); +ERROR: relation "f_star" does not exist INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name); @@ -50554,7 +50433,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_misc.out - + VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO f_star (class) VALUES ('f'); +ERROR: relation "f_star" does not exist -- Analyze the X_star tables for better plan stability in later tests @@ -51152,7 +51031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q; +ERROR: unimplemented: ROLLBACK TO SAVEPOINT not yet supported after DDL statements +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/10735/v24.2 ++See: https://go.crdb.dev/issue-v/10735/dev COMMIT; DROP TABLE remember_create_subid; +ERROR: relation "remember_create_subid" does not exist @@ -51163,7 +51042,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q; +ERROR: unimplemented: ROLLBACK TO SAVEPOINT not yet supported after DDL statements +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/10735/v24.2 ++See: https://go.crdb.dev/issue-v/10735/dev COMMIT; DROP TABLE remember_node_subid; -- @@ -51179,7 +51058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) INHERITS (some_table) PARTITION BY LIST (a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- cannot use more than 1 column as partition key for list partitioned table CREATE TABLE partitioned ( a1 int, @@ -51209,7 +51088,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out + EXCLUDE USING gist (a WITH &&) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev -- prevent using prohibited expressions in the key CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE; CREATE TABLE partitioned ( @@ -51377,7 +51256,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) PARTITION BY LIST (a) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE partitioned ( a point ) PARTITION BY LIST (a point_ops); @@ -51389,7 +51268,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) PARTITION BY LIST (a point_ops) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE partitioned ( a point ) PARTITION BY RANGE (a); @@ -51402,7 +51281,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) PARTITION BY RANGE (a) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE partitioned ( a point ) PARTITION BY RANGE (a point_ops); @@ -51414,7 +51293,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) PARTITION BY RANGE (a point_ops) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- cannot add NO INHERIT constraints to partitioned tables CREATE TABLE partitioned ( a int, @@ -51478,7 +51357,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +CREATE TABLE fail () INHERITS (partitioned2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- Partition key in describe output \d partitioned - Partitioned table "public.partitioned" @@ -51653,7 +51532,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +create domain intdom1 as int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table partitioned ( a intdom1, b text @@ -51678,7 +51557,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +drop domain intdom1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain intdom1 cascade; -NOTICE: drop cascades to table partitioned +ERROR: at or near "intdom1": syntax error: unimplemented: this syntax @@ -51686,7 +51565,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +drop domain intdom1 cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev table partitioned; -- gone ERROR: relation "partitioned" does not exist -LINE 1: table partitioned; @@ -51698,7 +51577,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +create domain intdom1 as int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table partitioned ( a intdom1, b text @@ -51723,7 +51602,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +drop domain intdom1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain intdom1 cascade; -NOTICE: drop cascades to table partitioned +ERROR: at or near "intdom1": syntax error: unimplemented: this syntax @@ -51731,7 +51610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +drop domain intdom1 cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev table partitioned; -- gone ERROR: relation "partitioned" does not exist -LINE 1: table partitioned; @@ -51955,7 +51834,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +) PARTITION BY LIST (a) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev CREATE TABLE moneyp_10 PARTITION OF moneyp FOR VALUES IN (10); +ERROR: at or near "partition": syntax error +DETAIL: source SQL: @@ -53059,7 +52938,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out -- user-defined operator class in partition key CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$; -@@ -958,117 +1699,260 @@ +@@ -958,117 +1699,259 @@ OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4), OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4), OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4); @@ -53068,7 +52947,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out +CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops); +ERROR: at or near "test_int4_ops": syntax error +DETAIL: source SQL: @@ -53275,15 +53154,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)'; return null; end $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger trig_part_create before insert on tab_part_create for each statement execute procedure func_part_create(); -+ERROR: at or near "trig_part_create": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_part_create before insert on tab_part_create -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into tab_part_create values (1); -ERROR: cannot CREATE TABLE .. PARTITION OF "tab_part_create" because it is being used by active queries in this session -CONTEXT: SQL statement "create table tab_part_create_1 partition of tab_part_create for values in (1)" @@ -53361,7 +53239,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table.out -- tests of column drop with partition tables and indexes using -- predicates and expressions. create table part_column_drop ( -@@ -1079,42 +1963,58 @@ +@@ -1079,42 +1962,58 @@ b int, useless_3 int ) partition by range (id); @@ -53459,7 +53337,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_reg (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_reg SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_reg" does not exist CREATE INDEX tbl_include_reg_idx ON tbl_include_reg (c1, c2) INCLUDE (c3, c4); @@ -53503,7 +53381,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_unique1 (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_unique1 SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_unique1" does not exist CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON tbl_include_unique1 using btree (c1, c2) INCLUDE (c3, c4); @@ -53533,7 +53411,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_unique2 (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_unique2 SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_unique2" does not exist CREATE UNIQUE INDEX tbl_include_unique2_idx_unique ON tbl_include_unique2 using btree (c1, c2) INCLUDE (c3, c4); @@ -53551,7 +53429,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_pk (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_pk SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_pk" does not exist ALTER TABLE tbl_include_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); @@ -53574,7 +53452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_box (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_box SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_box" does not exist CREATE UNIQUE INDEX tbl_include_box_idx_unique ON tbl_include_box using btree (c1, c2) INCLUDE (c3, c4); @@ -53601,7 +53479,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl_include_box_pk (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_include_box_pk SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl_include_box_pk" does not exist ALTER TABLE tbl_include_box_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); @@ -53621,7 +53499,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; - indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass -------------+----------+-------------+-------------+--------------+---------+----------- @@ -53650,7 +53528,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; - indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass -------------+----------+-------------+-------------+--------------+---------+----------- @@ -53735,7 +53613,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; - indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass ----------------------+----------+-------------+-------------+--------------+---------+----------- @@ -53764,7 +53642,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; - indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass -------------+----------+-------------+-------------+--------------+---------+----------- @@ -53799,7 +53677,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; - indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass --------------------+----------+-------------+-------------+--------------+--------+---------- @@ -53862,7 +53740,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2) INCLUDE(c3,c4); +ERROR: relation "tbl" does not exist SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; @@ -53894,7 +53772,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; - indexdef ---------------------------------------------------------------------------------------------- @@ -53963,7 +53841,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +ERROR: relation "tbl" does not exist CREATE UNIQUE INDEX CONCURRENTLY on tbl (c1, c2) INCLUDE (c3, c4); @@ -53989,7 +53867,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; - indexdef ---------------------------------------------------------------------------------------------- @@ -54036,7 +53914,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); -ERROR: access method "brin" does not support included columns +ERROR: at or near "brin": syntax error: unimplemented: this syntax @@ -54111,7 +53989,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl" does not exist CREATE UNIQUE INDEX tbl_idx_unique ON tbl using btree(c1, c2) INCLUDE (c3,c4); @@ -54140,7 +54018,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including.o +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: relation "tbl" does not exist ALTER TABLE tbl ALTER c1 TYPE bigint; @@ -54181,7 +54059,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- size is chosen to exceed page size and trigger actual truncation INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,8000) AS x; +ERROR: at or near "(": syntax error @@ -54262,7 +54140,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- size is chosen to exceed page size and trigger actual truncation CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c2,c3); +ERROR: relation "tbl_gist" does not exist @@ -54342,7 +54220,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -54370,7 +54248,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -54420,7 +54298,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -54445,7 +54323,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -54488,7 +54366,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/index_including_g +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box, EXCLUDE USING gist (c4 WITH &&) INCLUDE (c1, c2, c3)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; -ERROR: conflicting key value violates exclusion constraint "tbl_gist_c4_c1_c2_c3_excl" -DETAIL: Key (c4)=((4,5),(2,3)) conflicts with existing key (c4)=((2,3),(1,2)). @@ -54557,7 +54435,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - +CREATE FUNCTION interpt_pp(path, path) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE real_city ( pop int4, cname text, @@ -54572,7 +54450,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - +) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev \set filename :abs_srcdir '/data/real_city.data' COPY real_city FROM :'filename'; +ERROR: at or near ":abs_srcdir/data/real_city.data": syntax error: unimplemented: this syntax @@ -54658,7 +54536,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - -HINT: Use DROP ... CASCADE to drop the dependent objects too. +ERROR: relation "view_base_table" (316): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev CREATE VIEW key_dependent_view_no_cols AS SELECT FROM view_base_table GROUP BY key HAVING length(data) > 0; +ERROR: relation "key_dependent_view_no_cols" (318): table must contain at least 1 column @@ -56593,7 +56471,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev create view tt14v as select t.* from tt14f() t; +ERROR: unknown function: tt14f() +HINT: There is probably a typo in function name. Or the intention was to use a user-defined function in the view query, which is currently not supported. @@ -56677,7 +56555,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - -ERROR: cannot alter type of a column used by a view or rule -DETAIL: rule _RETURN on view tt14v depends on column "f4" +ERROR: ALTER COLUMN TYPE from string to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- ... but some bug might let it happen, so check defenses @@ -56778,7 +56656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - create type nestedcomposite as (x int8_tbl); +ERROR: unimplemented: composite types that reference user-defined types not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/91779/v24.2 ++See: https://go.crdb.dev/issue-v/91779/dev create view tt15v as select row(i)::nestedcomposite from int8_tbl i; +ERROR: type "nestedcomposite" does not exist select * from tt15v; @@ -57368,7 +57246,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_view.out - diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgist.out --label=/mnt/data1/postgres/src/test/regress/results/create_index_spgist.out /mnt/data1/postgres/src/test/regress/expected/create_index_spgist.out /mnt/data1/postgres/src/test/regress/results/create_index_spgist.out --- /mnt/data1/postgres/src/test/regress/expected/create_index_spgist.out +++ /mnt/data1/postgres/src/test/regress/results/create_index_spgist.out -@@ -3,1369 +3,1150 @@ +@@ -3,1369 +3,1125 @@ -- CREATE TABLE quad_point_tbl AS SELECT point(unique1,unique2) AS p FROM tenk1; @@ -57385,7 +57263,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi + SELECT '(333.0,400.0)'::point FROM generate_series(1,1000) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO quad_point_tbl VALUES (NULL), (NULL), (NULL); +ERROR: relation "quad_point_tbl" does not exist CREATE INDEX sp_quad_ind ON quad_point_tbl USING spgist (p); @@ -57509,7 +57387,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; - count -------- @@ -57521,7 +57399,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; - count -------- @@ -57569,30 +57447,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE quad_point_tbl_ord_seq1 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM quad_point_tbl; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE quad_point_tbl_ord_seq1 AS -+SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "quad_point_tbl" does not exist CREATE TEMP TABLE quad_point_tbl_ord_seq2 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; -+ERROR: at or near "-": syntax error ++ERROR: at or near "(200,200,1000,1000)": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEMP TABLE quad_point_tbl_ord_seq2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)' ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TEMP TABLE quad_point_tbl_ord_seq3 AS SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p FROM quad_point_tbl WHERE p IS NOT NULL; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE quad_point_tbl_ord_seq3 AS -+SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "quad_point_tbl" does not exist SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; - count -------- @@ -57822,7 +57692,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; - QUERY PLAN @@ -57848,7 +57718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; - QUERY PLAN @@ -57981,12 +57851,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE quad_point_tbl_ord_idx1 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM quad_point_tbl; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE quad_point_tbl_ord_idx1 AS -+SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "quad_point_tbl" does not exist SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN quad_point_tbl_ord_idx1 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58014,12 +57879,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE quad_point_tbl_ord_idx2 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; -+ERROR: at or near "-": syntax error ++ERROR: at or near "(200,200,1000,1000)": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEMP TABLE quad_point_tbl_ord_idx2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)' ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN quad_point_tbl_ord_idx2 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58047,12 +57914,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE quad_point_tbl_ord_idx3 AS SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p FROM quad_point_tbl WHERE p IS NOT NULL; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE quad_point_tbl_ord_idx3 AS -+SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "quad_point_tbl" does not exist SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN quad_point_tbl_ord_idx3 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58086,7 +57948,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; - QUERY PLAN @@ -58112,7 +57974,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; - QUERY PLAN @@ -58245,12 +58107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE kd_point_tbl_ord_idx1 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM kd_point_tbl; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE kd_point_tbl_ord_idx1 AS -+SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "kd_point_tbl" does not exist SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN kd_point_tbl_ord_idx1 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58278,12 +58135,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE kd_point_tbl_ord_idx2 AS SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; -+ERROR: at or near "-": syntax error ++ERROR: at or near "(200,200,1000,1000)": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEMP TABLE kd_point_tbl_ord_idx2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)' ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN kd_point_tbl_ord_idx2 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58311,12 +58170,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi CREATE TEMP TABLE kd_point_tbl_ord_idx3 AS SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p FROM kd_point_tbl WHERE p IS NOT NULL; -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+CREATE TEMP TABLE kd_point_tbl_ord_idx3 AS -+SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p -+ ^ -+HINT: try \h CREATE TABLE ++ERROR: relation "kd_point_tbl" does not exist SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN kd_point_tbl_ord_idx3 idx ON seq.n = idx.n WHERE seq.dist IS DISTINCT FROM idx.dist; @@ -58373,10 +58227,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi - (322,53) | 326.33265236565 -(10 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT p, dist FROM quad_point_tbl_ord_seq1 ORDER BY p <-> '0,0' LIMIT 10 -+ ^ ++ERROR: relation "quad_point_tbl_ord_seq1" does not exist RESET extra_float_digits; -- check ORDER BY distance to NULL SELECT (SELECT p FROM kd_point_tbl ORDER BY p <-> pt, p <-> '0,0' LIMIT 1) @@ -58388,10 +58239,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi - (1239,5647) -(3 rows) - -+ERROR: at or near "-": syntax error ++ERROR: at or near "1,2": syntax error +DETAIL: source SQL: +SELECT (SELECT p FROM kd_point_tbl ORDER BY p <-> pt, p <-> '0,0' LIMIT 1) -+ ^ ++FROM (VALUES (point '1,2'), (NULL), ('1234,5678')) pts(pt) ++ ^ ++HINT: try \h VALUES EXPLAIN (COSTS OFF) SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; - QUERY PLAN @@ -58846,7 +58699,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; - QUERY PLAN @@ -58874,7 +58727,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; - QUERY PLAN @@ -59026,7 +58879,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; - QUERY PLAN @@ -59054,7 +58907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; - QUERY PLAN @@ -59570,7 +59423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index_spgi diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out --label=/mnt/data1/postgres/src/test/regress/results/create_index.out /mnt/data1/postgres/src/test/regress/expected/create_index.out /mnt/data1/postgres/src/test/regress/results/create_index.out --- /mnt/data1/postgres/src/test/regress/expected/create_index.out +++ /mnt/data1/postgres/src/test/regress/results/create_index.out -@@ -4,646 +4,641 @@ +@@ -4,646 +4,637 @@ -- -- directory paths are passed to us in environment variables \getenv abs_srcdir PG_ABS_SRCDIR @@ -59650,7 +59503,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE fast_emp4000 ( home_base box ); @@ -59661,7 +59514,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev \set filename :abs_srcdir '/data/rect.data' COPY slow_emp4000 FROM :'filename'; +ERROR: at or near ":abs_srcdir/data/rect.data": syntax error: unimplemented: this syntax @@ -59760,7 +59613,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + ORDER BY (home_base[0])[0] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; - count -------- @@ -59772,7 +59625,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; - count -------- @@ -59791,7 +59644,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; - count -------- @@ -59803,7 +59656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; - count -------- @@ -59815,7 +59668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; - count -------- @@ -59827,7 +59680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; - count -------- @@ -59849,7 +59702,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; - count -------- @@ -59910,10 +59763,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - -(11 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT * FROM point_tbl ORDER BY f1 <-> '0,1' -+ ^ ++ERROR: relation "point_tbl" does not exist SELECT * FROM point_tbl WHERE f1 IS NULL; - f1 ----- @@ -59936,10 +59786,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - (NaN,NaN) -(10 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1' -+ ^ ++ERROR: relation "point_tbl" does not exist SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; - f1 ------------------- @@ -59955,7 +59802,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; - f1 -------------------------------------------------- @@ -59971,10 +59818,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - ((2424,81),(2424,160),(2424,160),(2424,81)) -(10 rows) - -+ERROR: at or near "-": syntax error ++ERROR: at or near "limit": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10 -+ ^ ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; - circle_center | radius -----------------+-------- @@ -59990,10 +59839,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - (751.5,2655) | 20 -(10 rows) - -+ERROR: at or near "-": syntax error ++ERROR: at or near "limit": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10 -+ ^ ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev -- Now check the results from plain indexscan SET enable_seqscan = OFF; +WARNING: setting session var "enable_seqscan" is a no-op @@ -60056,7 +59907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + ORDER BY (home_base[0])[0] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; - QUERY PLAN @@ -60082,7 +59933,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; - QUERY PLAN @@ -60129,7 +59980,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; - QUERY PLAN @@ -60155,7 +60006,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; - QUERY PLAN @@ -60181,7 +60032,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; - QUERY PLAN @@ -60207,7 +60058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; - QUERY PLAN @@ -60257,7 +60108,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; - QUERY PLAN @@ -60401,10 +60252,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - -(11 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT * FROM point_tbl ORDER BY f1 <-> '0,1' -+ ^ ++ERROR: relation "point_tbl" does not exist EXPLAIN (COSTS OFF) SELECT * FROM point_tbl WHERE f1 IS NULL; - QUERY PLAN @@ -60454,10 +60302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - (NaN,NaN) -(10 rows) - -+ERROR: at or near "-": syntax error -+DETAIL: source SQL: -+SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1' -+ ^ ++ERROR: relation "point_tbl" does not exist EXPLAIN (COSTS OFF) SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; - QUERY PLAN @@ -60487,7 +60332,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; - QUERY PLAN @@ -60517,10 +60362,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - ((2424,81),(2424,160),(2424,160),(2424,81)) -(10 rows) - -+ERROR: at or near "-": syntax error ++ERROR: at or near "limit": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10 -+ ^ ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; - QUERY PLAN @@ -60550,10 +60397,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - (751.5,2655) | 20 -(10 rows) - -+ERROR: at or near "-": syntax error ++ERROR: at or near "limit": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10 -+ ^ ++ ^ ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY f1 <-> point(x,x) LIMIT 1) as c FROM generate_series(0,10,1) x; - QUERY PLAN @@ -60651,7 +60500,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev RESET enable_seqscan; +WARNING: setting session var "enable_seqscan" is a no-op RESET enable_indexscan; @@ -60683,7 +60532,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- -- GIN over int[] and text[] -- -@@ -656,12 +651,26 @@ +@@ -656,12 +647,26 @@ ); \set filename :abs_srcdir '/data/array.data' COPY array_index_op_test FROM :'filename'; @@ -60714,7 +60563,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT * FROM array_index_op_test WHERE i @> '{NULL}' ORDER BY seqno; seqno | i | t -@@ -674,226 +683,95 @@ +@@ -674,226 +679,95 @@ (0 rows) SELECT * FROM array_index_op_test WHERE i <@ '{NULL}' ORDER BY seqno; @@ -61004,7 +60853,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT * FROM array_index_op_test WHERE i && '{}' ORDER BY seqno; seqno | i | t -@@ -901,201 +779,67 @@ +@@ -901,201 +775,67 @@ (0 rows) SELECT * FROM array_index_op_test WHERE i <@ '{}' ORDER BY seqno; @@ -61244,7 +61093,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT * FROM array_index_op_test WHERE t && '{}' ORDER BY seqno; seqno | i | t -@@ -1103,81 +847,78 @@ +@@ -1103,81 +843,78 @@ (0 rows) SELECT * FROM array_index_op_test WHERE t <@ '{}' ORDER BY seqno; @@ -61377,13 +61226,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- -- Try a GIN index with a lot of items with same key. (GIN creates a posting -- tree when there are enough duplicates) -@@ -1197,36 +938,80 @@ +@@ -1197,36 +934,80 @@ -- CREATE INDEX gin_relopts_test ON array_index_op_test USING gin (i) WITH (FASTUPDATE=on, GIN_PENDING_LIST_LIMIT=128); +ERROR: unimplemented: storage parameter "fastupdate" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev \d+ gin_relopts_test - Index "public.gin_relopts_test" - Column | Type | Key? | Definition | Storage | Stats target @@ -61474,7 +61323,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT count(*) FROM tenk1 WHERE stringu1 = 'TVAAAA'; count ------- -@@ -1234,13 +1019,34 @@ +@@ -1234,13 +1015,34 @@ (1 row) DROP INDEX hash_tuplesort_idx; @@ -61509,7 +61358,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out INSERT INTO unique_tbl VALUES (1, 'one'); INSERT INTO unique_tbl VALUES (2, 'two'); INSERT INTO unique_tbl VALUES (3, 'three'); -@@ -1248,120 +1054,133 @@ +@@ -1248,120 +1050,133 @@ INSERT INTO unique_tbl VALUES (5, 'one'); INSERT INTO unique_tbl (t) VALUES ('six'); INSERT INTO unique_tbl (t) VALUES ('seven'); -- error from unique_idx2 @@ -61711,7 +61560,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- -- Test unique index with included columns -- -@@ -1380,6 +1199,11 @@ +@@ -1380,6 +1195,11 @@ -- Try to use existing covering index as primary key ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING INDEX covering_pkey; @@ -61723,7 +61572,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE covering_index_heap; -- -- Try some concurrent index builds -@@ -1389,31 +1213,34 @@ +@@ -1389,31 +1209,34 @@ CREATE TABLE concur_heap (f1 text, f2 text); -- empty table CREATE INDEX CONCURRENTLY concur_index1 ON concur_heap(f2,f1); @@ -61764,11 +61613,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out COMMIT; -- test where predicate is able to do a transactional update during -- a concurrent build before switching pg_index state flags. -@@ -1423,52 +1250,73 @@ +@@ -1423,52 +1246,74 @@ EXECUTE 'SELECT txid_current()'; RETURN true; END; $$; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_dyn_exec is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -61868,7 +61718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- Temporary tables with concurrent builds and on-commit actions -- CONCURRENTLY used with CREATE INDEX and DROP INDEX is ignored. -- PRESERVE ROWS, the default. -@@ -1476,52 +1324,92 @@ +@@ -1476,52 +1321,92 @@ ON COMMIT PRESERVE ROWS; INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); CREATE INDEX CONCURRENTLY concur_temp_ind ON concur_temp(f1); @@ -61886,7 +61736,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + ON COMMIT DROP + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); +ERROR: current transaction is aborted, commands ignored until end of transaction block -- Fails when running in a transaction. @@ -61903,7 +61753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); +ERROR: relation "concur_temp" does not exist CREATE INDEX CONCURRENTLY concur_temp_ind ON concur_temp(f1); @@ -61973,7 +61823,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE concur_heap; -- -- Test ADD CONSTRAINT USING INDEX -@@ -1531,75 +1419,115 @@ +@@ -1531,75 +1416,115 @@ INSERT INTO cwi_test VALUES(1, 2), (3, 4), (5, 6); CREATE UNIQUE INDEX cwi_uniq_idx ON cwi_test(a , b); ALTER TABLE cwi_test ADD primary key USING INDEX cwi_uniq_idx; @@ -62134,7 +61984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE cwi_test; -- -- Check handling of indexes on system columns -@@ -1610,10 +1538,10 @@ +@@ -1610,10 +1535,10 @@ ERROR: relation "syscolcol_table" does not exist -- nor used in expressions CREATE INDEX ON syscol_table ((ctid >= '(1000,0)')); @@ -62147,7 +61997,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE syscol_table; -- -- Tests for IS NULL/IS NOT NULL with b-tree indexes -@@ -1622,8 +1550,33 @@ +@@ -1622,8 +1547,33 @@ INSERT INTO onek_with_null (unique1,unique2) VALUES (NULL, -1), (NULL, NULL); CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2,unique1); SET enable_seqscan = OFF; @@ -62181,7 +62031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; count ------- -@@ -1661,6 +1614,8 @@ +@@ -1661,6 +1611,8 @@ (1 row) DROP INDEX onek_nulltest; @@ -62190,7 +62040,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc,unique1); SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; count -@@ -1699,6 +1654,8 @@ +@@ -1699,6 +1651,8 @@ (1 row) DROP INDEX onek_nulltest; @@ -62199,7 +62049,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc nulls last,unique1); SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; count -@@ -1737,6 +1694,8 @@ +@@ -1737,6 +1691,8 @@ (1 row) DROP INDEX onek_nulltest; @@ -62208,7 +62058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 nulls first,unique1); SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; count -@@ -1775,17 +1734,44 @@ +@@ -1775,17 +1731,44 @@ (1 row) DROP INDEX onek_nulltest; @@ -62254,7 +62104,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out (2 rows) SELECT unique1, unique2 FROM onek_with_null WHERE unique2 >= -1 -@@ -1808,8 +1794,8 @@ +@@ -1808,8 +1791,8 @@ ORDER BY unique2 DESC LIMIT 2; unique1 | unique2 ---------+--------- @@ -62264,7 +62114,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out (2 rows) SELECT unique1, unique2 FROM onek_with_null WHERE unique2 >= -1 -@@ -1829,8 +1815,33 @@ +@@ -1829,8 +1812,33 @@ (2 rows) RESET enable_seqscan; @@ -62298,7 +62148,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE onek_with_null; -- -- Check bitmap index path planning -@@ -1838,19 +1849,11 @@ +@@ -1838,19 +1846,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM tenk1 WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42); @@ -62323,7 +62173,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT * FROM tenk1 WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42); unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 -@@ -1861,21 +1864,11 @@ +@@ -1861,21 +1861,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM tenk1 WHERE hundred = 42 AND (thousand = 42 OR thousand = 99); @@ -62350,7 +62200,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT count(*) FROM tenk1 WHERE hundred = 42 AND (thousand = 42 OR thousand = 99); count -@@ -1889,26 +1882,23 @@ +@@ -1889,26 +1879,23 @@ CREATE TABLE dupindexcols AS SELECT unique1 as id, stringu2::text as f1 FROM tenk1; CREATE INDEX dupindexcols_i ON dupindexcols (f1, id, f1 text_pattern_ops); @@ -62388,7 +62238,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- -- Check ordering of =ANY indexqual results (bug in 9.2.0) -- -@@ -1916,12 +1906,11 @@ +@@ -1916,12 +1903,11 @@ SELECT unique1 FROM tenk1 WHERE unique1 IN (1,42,7) ORDER BY unique1; @@ -62406,7 +62256,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT unique1 FROM tenk1 WHERE unique1 IN (1,42,7) ORDER BY unique1; -@@ -1936,13 +1925,11 @@ +@@ -1936,13 +1922,11 @@ SELECT thousand, tenthous FROM tenk1 WHERE thousand < 2 AND tenthous IN (1001,3000) ORDER BY thousand; @@ -62425,7 +62275,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT thousand, tenthous FROM tenk1 WHERE thousand < 2 AND tenthous IN (1001,3000) ORDER BY thousand; -@@ -1953,18 +1940,27 @@ +@@ -1953,18 +1937,27 @@ (2 rows) SET enable_indexonlyscan = OFF; @@ -62461,7 +62311,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT thousand, tenthous FROM tenk1 WHERE thousand < 2 AND tenthous IN (1001,3000) ORDER BY thousand; -@@ -1975,81 +1971,81 @@ +@@ -1975,81 +1968,81 @@ (2 rows) RESET enable_indexonlyscan; @@ -62591,7 +62441,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out \set VERBOSITY default DROP TABLE reindex_verbose; -- -@@ -2058,9 +2054,16 @@ +@@ -2058,9 +2051,16 @@ CREATE TABLE concur_reindex_tab (c1 int); -- REINDEX REINDEX TABLE concur_reindex_tab; -- notice @@ -62610,7 +62460,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index -- Normal index with integer column CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1); -@@ -2070,21 +2073,41 @@ +@@ -2070,21 +2070,41 @@ CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1)); -- Duplicate column names CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2); @@ -62632,7 +62482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out +CREATE TABLE concur_reindex_tab3 (c1 int, c2 int4range, EXCLUDE USING gist (c2 WITH &&)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev INSERT INTO concur_reindex_tab3 VALUES (3, '[1,2]'); +ERROR: relation "concur_reindex_tab3" does not exist REINDEX INDEX CONCURRENTLY concur_reindex_tab3_c2_excl; -- error @@ -62656,7 +62506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- Check materialized views CREATE MATERIALIZED VIEW concur_reindex_matview AS SELECT * FROM concur_reindex_tab; -- Dependency lookup before and after the follow-up REINDEX commands. -@@ -2101,21 +2124,25 @@ +@@ -2101,21 +2121,25 @@ 'concur_reindex_ind4'::regclass, 'concur_reindex_matview'::regclass) ORDER BY 1, 2; @@ -62694,7 +62544,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT pg_describe_object(classid, objid, objsubid) as obj, pg_describe_object(refclassid,refobjid,refobjsubid) as objref, deptype -@@ -2128,18 +2155,7 @@ +@@ -2128,18 +2152,7 @@ 'concur_reindex_ind4'::regclass, 'concur_reindex_matview'::regclass) ORDER BY 1, 2; @@ -62714,7 +62564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- Check that comments are preserved CREATE TABLE testcomment (i int); CREATE INDEX testcomment_idx1 ON testcomment (i); -@@ -2151,6 +2167,11 @@ +@@ -2151,6 +2164,11 @@ (1 row) REINDEX TABLE testcomment; @@ -62726,7 +62576,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT obj_description('testcomment_idx1'::regclass, 'pg_class'); obj_description ----------------- -@@ -2158,6 +2179,11 @@ +@@ -2158,6 +2176,11 @@ (1 row) REINDEX TABLE CONCURRENTLY testcomment ; @@ -62738,7 +62588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT obj_description('testcomment_idx1'::regclass, 'pg_class'); obj_description ----------------- -@@ -2169,13 +2195,24 @@ +@@ -2169,13 +2192,24 @@ CREATE TABLE concur_clustered(i int); CREATE INDEX concur_clustered_i_idx ON concur_clustered(i); ALTER TABLE concur_clustered CLUSTER ON concur_clustered_i_idx; @@ -62765,7 +62615,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE concur_clustered; -- Check that indisreplident updates are preserved. -@@ -2183,86 +2220,168 @@ +@@ -2183,86 +2217,168 @@ CREATE UNIQUE INDEX concur_replident_i_idx ON concur_replident(i); ALTER TABLE concur_replident REPLICA IDENTITY USING INDEX concur_replident_i_idx; @@ -62966,7 +62816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- REINDEX should preserve dependencies of partition tree. SELECT pg_describe_object(classid, objid, objsubid) as obj, pg_describe_object(refclassid,refobjid,refobjsubid) as objref, -@@ -2278,44 +2397,34 @@ +@@ -2278,44 +2394,34 @@ 'concur_reindex_part_index_0_1'::regclass, 'concur_reindex_part_index_0_2'::regclass) ORDER BY 1, 2; @@ -63033,7 +62883,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out SELECT pg_describe_object(classid, objid, objsubid) as obj, pg_describe_object(refclassid,refobjid,refobjsubid) as objref, deptype -@@ -2330,57 +2439,46 @@ +@@ -2330,57 +2436,46 @@ 'concur_reindex_part_index_0_1'::regclass, 'concur_reindex_part_index_0_2'::regclass) ORDER BY 1, 2; @@ -63118,11 +62968,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out ROLLBACK; -- Helper functions to track changes of relfilenodes in a partition tree. -- Create a table tracking the relfilenode state. -@@ -2397,6 +2495,18 @@ +@@ -2397,6 +2492,19 @@ relname, indname); END $func$ LANGUAGE plpgsql; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_dyn_exec is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -63137,7 +62988,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE OR REPLACE FUNCTION compare_relfilenode_part(tabname text) RETURNS TABLE (relname name, relkind "char", state text) AS $func$ -@@ -2412,195 +2522,252 @@ +@@ -2412,195 +2520,252 @@ ORDER BY 1;', tabname); END $func$ LANGUAGE plpgsql; @@ -63147,7 +62998,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + RETURNS TABLE (relname name, relkind "char", state text) AS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev -- Check that expected relfilenodes are changed, non-concurrent case. SELECT create_relfilenode_part('reindex_index_status', 'concur_reindex_part_index'); - create_relfilenode_part @@ -63503,18 +63354,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out DROP TABLE concur_reindex_tab4; -- Check handling of indexes with expressions and predicates. The -- definitions of the rebuilt indexes should match the original -@@ -2611,90 +2778,69 @@ - (897778963, FALSE); - CREATE UNIQUE INDEX concur_exprs_index_expr +@@ -2613,76 +2778,78 @@ ON concur_exprs_tab ((c1::text COLLATE "C")); -+ERROR: invalid locale c: language: tag is not well-formed CREATE UNIQUE INDEX concur_exprs_index_pred ON concur_exprs_tab (c1) WHERE (c1::text > 500000000::text COLLATE "C"); +ERROR: invalid locale c: language: tag is not well-formed CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON concur_exprs_tab ((1 / c1)) WHERE ('-H') >= (c2::TEXT) COLLATE "C"; -+ERROR: invalid locale c: language: tag is not well-formed ALTER INDEX concur_exprs_index_expr ALTER COLUMN 1 SET STATISTICS 100; +ERROR: at or near "alter": syntax error +DETAIL: source SQL: @@ -63539,9 +63386,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - pg_get_indexdef ---------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") --(1 row) -- -+ERROR: relation "concur_exprs_index_expr" does not exist ++ pg_get_indexdef ++------------------------------------------------------------------------------------------------------------ ++ CREATE UNIQUE INDEX concur_exprs_index_expr ON root.public.concur_exprs_tab USING btree ((c1::STRING) ASC) + (1 row) + SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); - pg_get_indexdef ----------------------------------------------------------------------------------------------------------------------------------------------- @@ -63553,9 +63402,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - pg_get_indexdef --------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= ((c2)::text COLLATE "C")) --(1 row) -- -+ERROR: relation "concur_exprs_index_pred_2" does not exist ++ pg_get_indexdef ++--------------------------------------------------------------------------------------------------------------------------------------------- ++ CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON root.public.concur_exprs_tab USING btree ((1 / c1) ASC) WHERE ('-H'::STRING >= c2::STRING) + (1 row) + REINDEX TABLE CONCURRENTLY concur_exprs_tab; +ERROR: at or near "concurrently": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -63566,9 +63417,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - pg_get_indexdef ---------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") --(1 row) -- -+ERROR: relation "concur_exprs_index_expr" does not exist ++ pg_get_indexdef ++------------------------------------------------------------------------------------------------------------ ++ CREATE UNIQUE INDEX concur_exprs_index_expr ON root.public.concur_exprs_tab USING btree ((c1::STRING) ASC) + (1 row) + SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); - pg_get_indexdef ----------------------------------------------------------------------------------------------------------------------------------------------- @@ -63580,22 +63433,26 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - pg_get_indexdef --------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= ((c2)::text COLLATE "C")) --(1 row) -- -+ERROR: relation "concur_exprs_index_pred_2" does not exist ++ pg_get_indexdef ++--------------------------------------------------------------------------------------------------------------------------------------------- ++ CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON root.public.concur_exprs_tab USING btree ((1 / c1) ASC) WHERE ('-H'::STRING >= c2::STRING) + (1 row) + -- ALTER TABLE recreates the indexes, which should keep their collations. ALTER TABLE concur_exprs_tab ALTER c2 TYPE TEXT; -+ERROR: ALTER COLUMN TYPE from bool to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++ERROR: cannot alter type of column "c2" because it is referenced by partial index "concur_exprs_index_pred_2" ++HINT: drop the partial index first, then alter type of the column +-- -+you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ++See: https://go.crdb.dev/issue-v/97372/dev SELECT pg_get_indexdef('concur_exprs_index_expr'::regclass); - pg_get_indexdef ---------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") --(1 row) -- -+ERROR: relation "concur_exprs_index_expr" does not exist ++ pg_get_indexdef ++------------------------------------------------------------------------------------------------------------ ++ CREATE UNIQUE INDEX concur_exprs_index_expr ON root.public.concur_exprs_tab USING btree ((c1::STRING) ASC) + (1 row) + SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); - pg_get_indexdef ----------------------------------------------------------------------------------------------------------------------------------------------- @@ -63607,12 +63464,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - pg_get_indexdef ------------------------------------------------------------------------------------------------------------------------------------------- - CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= (c2 COLLATE "C")) --(1 row) -- -+ERROR: relation "concur_exprs_index_pred_2" does not exist ++ pg_get_indexdef ++--------------------------------------------------------------------------------------------------------------------------------------------- ++ CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON root.public.concur_exprs_tab USING btree ((1 / c1) ASC) WHERE ('-H'::STRING >= c2::STRING) + (1 row) + -- Statistics should remain intact. - SELECT starelid::regclass, count(*) FROM pg_statistic WHERE starelid IN ( - 'concur_exprs_index_expr'::regclass, +@@ -2691,10 +2858,9 @@ 'concur_exprs_index_pred'::regclass, 'concur_exprs_index_pred_2'::regclass) GROUP BY starelid ORDER BY starelid::regclass::text; @@ -63626,7 +63484,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- attstattarget should remain intact SELECT attrelid::regclass, attnum, attstattarget -@@ -2703,13 +2849,7 @@ +@@ -2703,13 +2869,7 @@ 'concur_exprs_index_pred'::regclass, 'concur_exprs_index_pred_2'::regclass) ORDER BY attrelid::regclass::text, attnum; @@ -63637,11 +63495,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out - concur_exprs_index_pred_2 | 1 | -1 -(3 rows) - -+ERROR: relation "concur_exprs_index_expr" does not exist ++ERROR: relation "concur_exprs_index_pred" does not exist DROP TABLE concur_exprs_tab; -- Temporary tables and on-commit actions, where CONCURRENTLY is ignored. -- ON COMMIT PRESERVE ROWS, the default. -@@ -2718,18 +2858,50 @@ +@@ -2718,18 +2878,50 @@ INSERT INTO concur_temp_tab_1 VALUES (1, 'foo'), (2, 'bar'); CREATE INDEX concur_temp_ind_1 ON concur_temp_tab_1(c2); REINDEX TABLE CONCURRENTLY concur_temp_tab_1; @@ -63675,7 +63533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out + ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev CREATE INDEX concur_temp_ind_2 ON concur_temp_tab_2(c2); +ERROR: relation "concur_temp_tab_2" does not exist REINDEX TABLE CONCURRENTLY concur_temp_tab_2; @@ -63693,7 +63551,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out -- ON COMMIT DROP BEGIN; CREATE TEMP TABLE concur_temp_tab_3 (c1 int, c2 text) -@@ -2738,7 +2910,11 @@ +@@ -2738,7 +2930,11 @@ CREATE INDEX concur_temp_ind_3 ON concur_temp_tab_3(c2); -- Fails when running in a transaction REINDEX INDEX CONCURRENTLY concur_temp_ind_3; @@ -63706,7 +63564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out COMMIT; -- REINDEX SCHEMA processes all temporary relations CREATE TABLE reindex_temp_before AS -@@ -2746,30 +2922,36 @@ +@@ -2746,30 +2942,36 @@ FROM pg_class WHERE relname IN ('concur_temp_ind_1', 'concur_temp_ind_2'); SELECT pg_my_temp_schema()::regnamespace as temp_schema_name \gset @@ -63743,14 +63601,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE SCHEMA schema_to_reindex; SET search_path = 'schema_to_reindex'; CREATE TABLE table1(col1 SERIAL PRIMARY KEY); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html INSERT INTO table1 SELECT generate_series(1,400); CREATE TABLE table2(col1 SERIAL PRIMARY KEY, col2 TEXT NOT NULL); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html INSERT INTO table2 SELECT generate_series(1,400), 'abc'; CREATE INDEX ON table2(col2); CREATE MATERIALIZED VIEW matview AS SELECT col1 FROM table2; -@@ -2789,6 +2971,11 @@ +@@ -2789,6 +2991,11 @@ (select indexrelid from pg_index where indrelid in (select reltoastrelid from reindex_before where reltoastrelid > 0)); REINDEX SCHEMA schema_to_reindex; @@ -63762,7 +63620,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_index.out CREATE TABLE reindex_after AS SELECT oid, relname, relfilenode, relkind FROM pg_class where relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'schema_to_reindex'); -@@ -2798,51 +2985,69 @@ +@@ -2798,51 +3005,69 @@ ELSE 'relfilenode has changed' END FROM reindex_before b JOIN pg_class a ON b.oid = a.oid ORDER BY 1; @@ -63870,7 +63728,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_cast.out - +CREATE TYPE casttesttype + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev CREATE FUNCTION casttesttype_in(cstring) RETURNS casttesttype AS 'textin' @@ -63895,7 +63753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_cast.out - + internallength = variable, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev -- a dummy function to test with CREATE FUNCTION casttestfunc(casttesttype) RETURNS int4 LANGUAGE SQL AS $$ SELECT 1; $$; @@ -64115,7 +63973,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE newavg ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- test comments COMMENT ON AGGREGATE newavg_wrong (int4) IS 'an agg comment'; -ERROR: aggregate newavg_wrong(integer) does not exist @@ -64143,7 +64001,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE newsum ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- zero-argument aggregate CREATE AGGREGATE newcnt (*) ( sfunc = int8inc, stype = int8, @@ -64154,7 +64012,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE newcnt (*) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- old-style spelling of same (except without parallel-safe; that's too new) CREATE AGGREGATE oldcnt ( sfunc = int8inc, basetype = 'ANY', stype = int8, @@ -64165,7 +64023,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE oldcnt ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- aggregate that only cares about null/nonnull input CREATE AGGREGATE newcnt ("any") ( sfunc = int8inc_any, stype = int8, @@ -64176,7 +64034,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE newcnt ("any") ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev COMMENT ON AGGREGATE nosuchagg (*) IS 'should fail'; -ERROR: aggregate nosuchagg(*) does not exist +ERROR: at or near "aggregate": syntax error @@ -64205,7 +64063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate sum2(int8,int8) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- multi-argument aggregates sensitive to distinct/order, strict/nonstrict create type aggtype as (a integer, b integer, c text); create function aggf_trans(aggtype[],integer,integer,text) returns aggtype[] @@ -64225,7 +64083,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate aggfstr(integer,integer,text) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate aggfns(integer,integer,text) ( sfunc = aggfns_trans, stype = aggtype[], sspace = 10000, initcond = '{}' @@ -64235,7 +64093,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate aggfns(integer,integer,text) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- check error cases that would require run-time type coercion create function least_accum(int8, int8) returns int8 language sql as 'select least($1, $2)'; @@ -64248,7 +64106,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate least_agg(int4) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev drop function least_accum(int8, int8); create function least_accum(anycompatible, anycompatible) returns anycompatible language sql as @@ -64263,7 +64121,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate least_agg(int4) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate least_agg(int8) ( stype = int8, sfunc = least_accum ); @@ -64272,7 +64130,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate least_agg(int8) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev drop function least_accum(anycompatible, anycompatible) cascade; -NOTICE: drop cascades to function least_agg(bigint) +ERROR: unimplemented: drop function cascade not supported @@ -64296,7 +64154,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create function least_accum(anyelement, variadic anyarray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev create aggregate least_agg(variadic items anyarray) ( stype = anyelement, sfunc = least_accum ); @@ -64305,7 +64163,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate least_agg(variadic items anyarray) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create function cleast_accum(anycompatible, variadic anycompatiblearray) returns anycompatible language sql as 'select least($1, min($2[i])) from generate_subscripts($2,1) g(i)'; @@ -64314,7 +64172,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create function cleast_accum(anycompatible, variadic anycompatiblearray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev create aggregate cleast_agg(variadic items anycompatiblearray) ( stype = anycompatible, sfunc = cleast_accum ); @@ -64323,7 +64181,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate cleast_agg(variadic items anycompatiblearray) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- test ordered-set aggs using built-in support functions create aggregate my_percentile_disc(float8 ORDER BY anyelement) ( stype = internal, @@ -64336,7 +64194,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate my_percentile_disc(float8 ORDER BY anyelement) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( stype = internal, sfunc = ordered_set_transition_multi, @@ -64349,7 +64207,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +create aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev alter aggregate my_percentile_disc(float8 ORDER BY anyelement) rename to test_percentile_disc; +ERROR: at or near "my_percentile_disc": syntax error: unimplemented: this syntax @@ -64357,7 +64215,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +alter aggregate my_percentile_disc(float8 ORDER BY anyelement) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev alter aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") rename to test_rank; +ERROR: at or near "my_rank": syntax error: unimplemented: this syntax @@ -64365,7 +64223,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +alter aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev \da test_* - List of aggregate functions - Schema | Name | Result data type | Argument data types | Description @@ -64401,7 +64259,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE sumdouble (float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- aggregate combine and serialization functions -- can't specify just one of serialfunc and deserialfunc CREATE AGGREGATE myavg (numeric) @@ -64415,7 +64273,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- serialfunc must have correct parameters CREATE AGGREGATE myavg (numeric) ( @@ -64429,7 +64287,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- deserialfunc must have correct parameters CREATE AGGREGATE myavg (numeric) ( @@ -64443,7 +64301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- ensure combine function parameters are checked CREATE AGGREGATE myavg (numeric) ( @@ -64457,7 +64315,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- ensure create aggregate works. CREATE AGGREGATE myavg (numeric) ( @@ -64470,7 +64328,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- Ensure all these functions made it into the catalog SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, aggserialfn, aggdeserialfn, aggfinalmodify @@ -64488,7 +64346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +DROP AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- create or replace aggregate CREATE AGGREGATE myavg (numeric) ( @@ -64501,7 +64359,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE OR REPLACE AGGREGATE myavg (numeric) ( stype = internal, @@ -64594,7 +64452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +DROP AGGREGATE myavg (numeric) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- invalid: bad parallel-safety marking CREATE AGGREGATE mysum (int) ( @@ -64608,7 +64466,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE mysum (int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- invalid: nonstrict inverse with strict forward function CREATE FUNCTION float8mi_n(float8, float8) RETURNS float8 AS $$ SELECT $1 - $2; $$ @@ -64622,7 +64480,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE invalidsumdouble (float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- invalid: non-matching result types CREATE FUNCTION float8mi_int(float8, float8) RETURNS int AS $$ SELECT CAST($1 - $2 AS INT); $$ @@ -64636,7 +64494,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE wrongreturntype (float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- invalid: non-lowercase quoted identifiers CREATE AGGREGATE case_agg ( -- old syntax "Sfunc1" = int4pl, @@ -64655,7 +64513,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE case_agg ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE case_agg(float8) ( "Stype" = internal, @@ -64675,7 +64533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_aggregate. +CREATE AGGREGATE case_agg(float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --label=/mnt/data1/postgres/src/test/regress/results/hash_func.out /mnt/data1/postgres/src/test/regress/expected/hash_func.out /mnt/data1/postgres/src/test/regress/results/hash_func.out --- /mnt/data1/postgres/src/test/regress/expected/hash_func.out +++ /mnt/data1/postgres/src/test/regress/results/hash_func.out @@ -64820,7 +64678,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --l +FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev SELECT v as value, hashinet(v)::bit(32) as standard, hashinetextended(v, 0)::bit(32) as extended0, hashinetextended(v, 1)::bit(32) as extended1 @@ -64864,7 +64722,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --l +FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev SELECT v as value, hash_array(v)::bit(32) as standard, hash_array_extended(v, 0)::bit(32) as extended0, hash_array_extended(v, 1)::bit(32) as extended1 @@ -64887,7 +64745,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --l +FROM (VALUES ('{0}'::money[])) x(v) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev SELECT v as value, hash_array_extended(v, 0)::bit(32) as extended0 FROM (VALUES ('{0}'::money[])) x(v); -ERROR: could not identify an extended hash function for type money @@ -64897,7 +64755,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --l +FROM (VALUES ('{0}'::money[])) x(v) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev SELECT v as value, hashbpchar(v)::bit(32) as standard, hashbpcharextended(v, 0)::bit(32) as extended0, hashbpcharextended(v, 1)::bit(32) as extended1 @@ -65050,7 +64908,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/hash_func.out --l +CREATE TYPE hash_test_t2 AS (a money, b text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev SELECT v as value, hash_record(v)::bit(32) as standard FROM (VALUES (row(1, 'aaa')::hash_test_t2)) x(v); -ERROR: could not identify a hash function for type money @@ -66520,7 +66378,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP DOMAIN test_domain_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN IF EXISTS test_domain_exists; -NOTICE: type "test_domain_exists" does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -66528,21 +66386,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP DOMAIN IF EXISTS test_domain_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE domain test_domain_exists as int not null check (value > 0); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE domain test_domain_exists as int not null check (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN IF EXISTS test_domain_exists; +ERROR: at or near "if": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN IF EXISTS test_domain_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN test_domain_exists; -ERROR: type "test_domain_exists" does not exist +ERROR: at or near "test_domain_exists": syntax error: unimplemented: this syntax @@ -66550,11 +66408,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP DOMAIN test_domain_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev --- --- role/user/group --- -@@ -68,249 +91,1081 @@ +@@ -68,249 +91,1054 @@ CREATE ROLE regress_test_r1; CREATE GROUP regress_test_g1; DROP USER regress_test_u2; @@ -66675,7 +66533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH PARSER test_tsparser_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH PARSER IF EXISTS test_tsparser_exists; -NOTICE: text search parser "test_tsparser_exists" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -66683,7 +66541,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH PARSER IF EXISTS test_tsparser_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- text search dictionary DROP TEXT SEARCH DICTIONARY test_tsdict_exists; -ERROR: text search dictionary "test_tsdict_exists" does not exist @@ -66692,7 +66550,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH DICTIONARY test_tsdict_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH DICTIONARY IF EXISTS test_tsdict_exists; -NOTICE: text search dictionary "test_tsdict_exists" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -66700,7 +66558,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH DICTIONARY IF EXISTS test_tsdict_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY test_tsdict_exists ( Template=ispell, DictFile=ispell_sample, @@ -66711,14 +66569,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +CREATE TEXT SEARCH DICTIONARY test_tsdict_exists ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH DICTIONARY test_tsdict_exists; +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP TEXT SEARCH DICTIONARY test_tsdict_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- test search template DROP TEXT SEARCH TEMPLATE test_tstemplate_exists; -ERROR: text search template "test_tstemplate_exists" does not exist @@ -66727,7 +66585,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH TEMPLATE test_tstemplate_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH TEMPLATE IF EXISTS test_tstemplate_exists; -NOTICE: text search template "test_tstemplate_exists" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -66735,7 +66593,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH TEMPLATE IF EXISTS test_tstemplate_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- text search configuration DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists; -ERROR: text search configuration "test_tsconfig_exists" does not exist @@ -66744,7 +66602,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH CONFIGURATION IF EXISTS test_tsconfig_exists; -NOTICE: text search configuration "test_tsconfig_exists" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -66752,21 +66610,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH CONFIGURATION IF EXISTS test_tsconfig_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH CONFIGURATION test_tsconfig_exists (COPY=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION test_tsconfig_exists (COPY=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists; +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- extension DROP EXTENSION test_extension_exists; -ERROR: extension "test_extension_exists" does not exist @@ -66775,7 +66633,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP EXTENSION test_extension_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74777/v24.2 ++See: https://go.crdb.dev/issue-v/74777/dev DROP EXTENSION IF EXISTS test_extension_exists; -NOTICE: extension "test_extension_exists" does not exist, skipping +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -66783,7 +66641,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP EXTENSION IF EXISTS test_extension_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74777/v24.2 ++See: https://go.crdb.dev/issue-v/74777/dev -- functions DROP FUNCTION test_function_exists(); -ERROR: function test_function_exists() does not exist @@ -66803,7 +66661,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE test_aggregate_exists(*) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP AGGREGATE IF EXISTS test_aggregate_exists(*); -NOTICE: aggregate test_aggregate_exists() does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -66811,7 +66669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE IF EXISTS test_aggregate_exists(*) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP AGGREGATE test_aggregate_exists(int); -ERROR: aggregate test_aggregate_exists(integer) does not exist +ERROR: at or near "test_aggregate_exists": syntax error: unimplemented: this syntax @@ -66819,7 +66677,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE test_aggregate_exists(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP AGGREGATE IF EXISTS test_aggregate_exists(int); -NOTICE: aggregate test_aggregate_exists(pg_catalog.int4) does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -66827,7 +66685,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE IF EXISTS test_aggregate_exists(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- operator DROP OPERATOR @#@ (int, int); -ERROR: operator does not exist: integer @#@ integer @@ -66870,7 +66728,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +CREATE OPERATOR @#@ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev DROP OPERATOR @#@ (int8, int8); +ERROR: at or near "@": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -66895,7 +66753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP LANGUAGE test_language_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev DROP LANGUAGE IF EXISTS test_language_exists; -NOTICE: language "test_language_exists" does not exist, skipping +ERROR: at or near "exists": syntax error: unimplemented: this syntax @@ -66903,7 +66761,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP LANGUAGE IF EXISTS test_language_exists + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev -- cast DROP CAST (text AS text); -ERROR: cast from type text to type text does not exist @@ -66942,68 +66800,44 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou -- trigger DROP TRIGGER test_trigger_exists ON test_exists; -ERROR: trigger "test_trigger_exists" for table "test_exists" does not exist -+ERROR: at or near "test_trigger_exists": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER test_trigger_exists ON test_exists -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER IF EXISTS test_trigger_exists ON test_exists; -NOTICE: trigger "test_trigger_exists" for relation "test_exists" does not exist, skipping -+ERROR: at or near "if": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER IF EXISTS test_trigger_exists ON test_exists -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER test_trigger_exists ON no_such_table; -ERROR: relation "no_such_table" does not exist -+ERROR: at or near "test_trigger_exists": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER test_trigger_exists ON no_such_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_table; -NOTICE: relation "no_such_table" does not exist, skipping -+ERROR: at or near "if": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER test_trigger_exists ON no_such_schema.no_such_table; -ERROR: schema "no_such_schema" does not exist -+ERROR: at or near "test_trigger_exists": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER test_trigger_exists ON no_such_schema.no_such_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_schema.no_such_table; -NOTICE: schema "no_such_schema" does not exist, skipping -+ERROR: at or near "if": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_schema.no_such_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER test_trigger_exists BEFORE UPDATE ON test_exists FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); -+ERROR: at or near "test_trigger_exists": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER test_trigger_exists -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER test_trigger_exists ON test_exists; -+ERROR: at or near "test_trigger_exists": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER test_trigger_exists ON test_exists -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- rule DROP RULE test_rule_exists ON test_exists; -ERROR: rule "test_rule_exists" for relation "test_exists" does not exist @@ -67397,7 +67231,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE IF EXISTS no_such_schema.foo(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP AGGREGATE IF EXISTS foo(no_such_type); -NOTICE: type "no_such_type" does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -67405,7 +67239,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE IF EXISTS foo(no_such_type) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP AGGREGATE IF EXISTS foo(no_such_schema.no_such_type); -NOTICE: schema "no_such_schema" does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -67413,7 +67247,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP AGGREGATE IF EXISTS foo(no_such_schema.no_such_type) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP CAST IF EXISTS (INTEGER AS no_such_type2); -NOTICE: type "no_such_type2" does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -67523,7 +67357,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP DOMAIN IF EXISTS no_such_schema.foo + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP FOREIGN TABLE IF EXISTS no_such_schema.foo; -NOTICE: schema "no_such_schema" does not exist, skipping +ERROR: at or near "if": syntax error: unimplemented: this syntax @@ -67683,7 +67517,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH CONFIGURATION IF EXISTS no_such_schema.foo + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH DICTIONARY IF EXISTS no_such_schema.foo; -NOTICE: schema "no_such_schema" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -67691,7 +67525,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH DICTIONARY IF EXISTS no_such_schema.foo + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH PARSER IF EXISTS no_such_schema.foo; -NOTICE: schema "no_such_schema" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -67699,7 +67533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH PARSER IF EXISTS no_such_schema.foo + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TEXT SEARCH TEMPLATE IF EXISTS no_such_schema.foo; -NOTICE: schema "no_such_schema" does not exist, skipping +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -67707,15 +67541,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou +DROP TEXT SEARCH TEMPLATE IF EXISTS no_such_schema.foo + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev DROP TRIGGER IF EXISTS foo ON no_such_schema.bar; -NOTICE: schema "no_such_schema" does not exist, skipping -+ERROR: at or near "if": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER IF EXISTS foo ON no_such_schema.bar -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TYPE IF EXISTS no_such_schema.foo; -NOTICE: schema "no_such_schema" does not exist, skipping DROP VIEW IF EXISTS no_such_schema.foo; @@ -67733,7 +67564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_if_exists.ou -- cleanup DROP FUNCTION test_ambiguous_funcname(int); DROP FUNCTION test_ambiguous_funcname(text); -@@ -319,24 +1174,37 @@ +@@ -319,24 +1147,37 @@ CREATE PROCEDURE test_ambiguous_procname(text) as $$ begin end; $$ language plpgsql; DROP PROCEDURE test_ambiguous_procname; ERROR: procedure name "test_ambiguous_procname" is not unique @@ -67949,7 +67780,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/typed_table.out - -drop cascades to table persons3 +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev CREATE TABLE persons5 OF stuff; -- only CREATE TYPE AS types may be used -ERROR: type stuff is not a composite type +ERROR: at or near "of": syntax error @@ -68087,7 +67918,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +select null from pg_database group by grouping sets (()) for update + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- -- DELETE -- missing relation name (this had better not wildcard!) @@ -68186,7 +68017,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +create aggregate newavg2 (sfunc = int4pl, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- left out basetype create aggregate newcnt1 (sfunc = int4inc, stype = int4, @@ -68197,7 +68028,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +create aggregate newcnt1 (sfunc = int4inc, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- -- DROP INDEX -- missing index name @@ -68235,7 +68066,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- missing aggregate type drop aggregate newcnt1; -ERROR: syntax error at or near ";" @@ -68246,7 +68077,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate newcnt1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- bad aggregate name drop aggregate 314159 (int); -ERROR: syntax error at or near "314159" @@ -68257,7 +68088,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate 314159 (int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- bad aggregate type drop aggregate newcnt (nonesuch); -ERROR: type "nonesuch" does not exist @@ -68266,7 +68097,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate newcnt (nonesuch) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- no such aggregate drop aggregate nonesuch (int4); -ERROR: aggregate nonesuch(integer) does not exist @@ -68275,7 +68106,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate nonesuch (int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- no such aggregate for type drop aggregate newcnt (float4); -ERROR: aggregate newcnt(real) does not exist @@ -68284,7 +68115,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/errors.out --labe +drop aggregate newcnt (float4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- -- DROP FUNCTION -- missing function name @@ -68931,105 +68762,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s SELECT proname, provolatile FROM pg_proc WHERE oid in ('functest_B_1'::regproc, 'functest_B_2'::regproc, -@@ -95,33 +110,83 @@ - AS 'SELECT $1 > 0'; - CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' - SECURITY DEFINER AS 'SELECT $1 = 0'; -+ERROR: at or near "definer": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' -+ SECURITY DEFINER AS 'SELECT $1 = 0' -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+ -+Please check the public issue tracker to check whether this problem is -+already tracked. If you cannot find it there, please report the error -+with details by creating a new issue. -+ -+If you would rather not post publicly, please contact us directly -+using the support form. -+ -+We appreciate your feedback. -+ - CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' - SECURITY INVOKER AS 'SELECT $1 < 0'; -+ERROR: at or near "invoker": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' -+ SECURITY INVOKER AS 'SELECT $1 < 0' -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+ -+Please check the public issue tracker to check whether this problem is -+already tracked. If you cannot find it there, please report the error -+with details by creating a new issue. -+ -+If you would rather not post publicly, please contact us directly -+using the support form. -+ -+We appreciate your feedback. -+ - SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functest_C_1'::regproc, - 'functest_C_2'::regproc, - 'functest_C_3'::regproc) ORDER BY proname; -- proname | prosecdef ----------------+----------- -- functest_c_1 | f +@@ -104,7 +119,7 @@ + proname | prosecdef + --------------+----------- + functest_c_1 | f - functest_c_2 | t -- functest_c_3 | f --(3 rows) -- -+ERROR: unknown function: functest_c_2() - ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect - ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; -+ERROR: at or near "invoker": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+ALTER FUNCTION functest_C_2(int) SECURITY INVOKER -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+ -+Please check the public issue tracker to check whether this problem is -+already tracked. If you cannot find it there, please report the error -+with details by creating a new issue. -+ -+If you would rather not post publicly, please contact us directly -+using the support form. -+ -+We appreciate your feedback. -+ - ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; -+ERROR: at or near "definer": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+ALTER FUNCTION functest_C_3(int) SECURITY DEFINER -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+ -+Please check the public issue tracker to check whether this problem is -+already tracked. If you cannot find it there, please report the error -+with details by creating a new issue. -+ -+If you would rather not post publicly, please contact us directly -+using the support form. -+ -+We appreciate your feedback. -+ - SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functest_C_1'::regproc, - 'functest_C_2'::regproc, - 'functest_C_3'::regproc) ORDER BY proname; -- proname | prosecdef ----------------+----------- -- functest_c_1 | f -- functest_c_2 | f ++ functest_c_2 | f + functest_c_3 | f + (3 rows) + +@@ -119,7 +134,7 @@ + --------------+----------- + functest_c_1 | f + functest_c_2 | f - functest_c_3 | t --(3 rows) -- -+ERROR: unknown function: functest_c_2() - -- - -- LEAKPROOF ++ functest_c_3 | f + (3 rows) + -- -@@ -129,48 +194,49 @@ +@@ -129,48 +144,49 @@ AS 'SELECT $1 > 100'; CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 > 100'; @@ -69080,7 +68831,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s +SET SESSION AUTHORIZATION regress_unpriv_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET search_path TO temp_func_test, public; ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; ALTER FUNCTION functest_E_2(int) LEAKPROOF; @@ -69099,7 +68850,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT -- -@@ -213,46 +279,47 @@ +@@ -213,46 +229,63 @@ -- pg_get_functiondef tests SELECT pg_get_functiondef('functest_A_1'::regproc); @@ -69118,6 +68869,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s + NOT LEAKPROOF + + CALLED ON NULL INPUT + + LANGUAGE SQL + ++ SECURITY INVOKER + + AS $$ + + SELECT ($1 = 'abcd') AND ($2 > '2001-01-01'); + + $$ @@ -69140,6 +68892,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s + NOT LEAKPROOF + + CALLED ON NULL INPUT + + LANGUAGE SQL + ++ SECURITY INVOKER + + AS $$ + + SELECT $1 = 0; + + $$ @@ -69154,9 +68907,20 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s - SECURITY DEFINER + - AS $function$SELECT $1 < 0$function$ + - --(1 row) -- -+ERROR: unknown function: functest_c_3() ++ pg_get_functiondef ++--------------------------------------------------- ++ CREATE FUNCTION temp_func_test.functest_c_3(INT8)+ ++ RETURNS BOOL + ++ VOLATILE + ++ NOT LEAKPROOF + ++ CALLED ON NULL INPUT + ++ LANGUAGE SQL + ++ SECURITY DEFINER + ++ AS $$ + ++ SELECT $1 < 0; + ++ $$ + (1 row) + SELECT pg_get_functiondef('functest_F_2'::regproc); - pg_get_functiondef ------------------------------------------------------------------ @@ -69174,13 +68938,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s + NOT LEAKPROOF + + STRICT + + LANGUAGE SQL + ++ SECURITY INVOKER + + AS $$ + + SELECT $1 = 50; + + $$ (1 row) -- -@@ -261,24 +328,91 @@ +@@ -261,24 +294,91 @@ CREATE FUNCTION functest_S_1(a text, b date) RETURNS boolean LANGUAGE SQL RETURN a = 'abcd' AND b > '2001-01-01'; @@ -69272,7 +69037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- check display of function arguments in sub-SELECT CREATE TABLE functest1 (i int); CREATE FUNCTION functest_S_16(a int, b int) RETURNS void -@@ -286,174 +420,129 @@ +@@ -286,174 +386,129 @@ BEGIN ATOMIC INSERT INTO functest1 SELECT a + $2; END; @@ -69529,7 +69294,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- information_schema tests CREATE FUNCTION functest_IS_1(a int, b int default 1, c text default 'foo') RETURNS int -@@ -463,6 +552,7 @@ +@@ -463,6 +518,7 @@ RETURNS int LANGUAGE SQL AS 'SELECT $1'; @@ -69537,7 +69302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s CREATE FUNCTION functest_IS_3(a int default 1, out b int) RETURNS int LANGUAGE SQL -@@ -474,15 +564,14 @@ +@@ -474,15 +530,14 @@ routine_name | ordinal_position | parameter_name | parameter_default ---------------+------------------+----------------+------------------- functest_is_1 | 1 | a | @@ -69558,7 +69323,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- routine usage views CREATE FUNCTION functest_IS_4a() RETURNS int LANGUAGE SQL AS 'SELECT 1'; CREATE FUNCTION functest_IS_4b(x int DEFAULT functest_IS_4a()) RETURNS int LANGUAGE SQL AS 'SELECT x'; -@@ -495,11 +584,35 @@ +@@ -495,11 +550,35 @@ RETURNS int LANGUAGE SQL RETURN nextval('functest1'); @@ -69594,7 +69359,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s SELECT r0.routine_name, r1.routine_name FROM information_schema.routine_routine_usage rru JOIN information_schema.routines r0 ON r0.specific_name = rru.specific_name -@@ -507,61 +620,63 @@ +@@ -507,61 +586,63 @@ WHERE r0.routine_schema = 'temp_func_test' AND r1.routine_schema = 'temp_func_test' ORDER BY 1, 2; @@ -69690,7 +69455,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s DROP FUNCTION functest1(a int); -- inlining of set-returning functions CREATE TABLE functest3 (a int); -@@ -581,35 +696,38 @@ +@@ -581,35 +662,38 @@ (3 rows) EXPLAIN (verbose, costs off) SELECT * FROM functest_sri1(); @@ -69749,7 +69514,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- Check behavior of VOID-returning SQL functions CREATE FUNCTION voidtest1(a int) RETURNS VOID LANGUAGE SQL AS $$ SELECT a + 1 $$; -@@ -629,12 +747,11 @@ +@@ -629,12 +713,11 @@ -- currently, we can inline voidtest2 but not voidtest1 EXPLAIN (verbose, costs off) SELECT voidtest2(11,22); @@ -69767,7 +69532,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s CREATE TEMP TABLE sometable(f1 int); CREATE FUNCTION voidtest3(a int) RETURNS VOID LANGUAGE SQL AS $$ INSERT INTO sometable VALUES(a + 1) $$; -@@ -664,7 +781,8 @@ +@@ -664,7 +747,8 @@ SELECT * FROM voidtest5(3); voidtest5 ----------- @@ -69777,7 +69542,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_function_s -- Regression tests for bugs: -- Check that arguments that are R/W expanded datums aren't corrupted by -@@ -674,70 +792,60 @@ +@@ -674,70 +758,60 @@ CREATE FUNCTION double_append(anyarray, anyelement) RETURNS SETOF anyarray LANGUAGE SQL IMMUTABLE AS $$ SELECT array_append($1, $2) || array_append($1, $2) $$; @@ -69965,7 +69730,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_am.out --l +CREATE OPERATOR CLASS box_ops DEFAULT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- Create gist2 index on fast_emp4000 CREATE INDEX grect2ind2 ON fast_emp4000 USING gist2 (home_base); +ERROR: at or near "gist2": syntax error: unrecognized access method: gist2 @@ -70016,7 +69781,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_am.out --l + ORDER BY (home_base[0])[0] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; - QUERY PLAN @@ -70042,7 +69807,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_am.out --l +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev EXPLAIN (COSTS OFF) SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; - QUERY PLAN @@ -70675,7 +70440,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_am.out --l diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/infinite_recurse.out --label=/mnt/data1/postgres/src/test/regress/results/infinite_recurse.out /mnt/data1/postgres/src/test/regress/expected/infinite_recurse.out /mnt/data1/postgres/src/test/regress/results/infinite_recurse.out --- /mnt/data1/postgres/src/test/regress/expected/infinite_recurse.out +++ /mnt/data1/postgres/src/test/regress/results/infinite_recurse.out -@@ -2,6 +2,8 @@ +@@ -2,6 +2,7 @@ -- max_stack_depth is not set too high. create function infinite_recurse() returns int as 'select infinite_recurse()' language sql; @@ -70683,7 +70448,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/infinite_recurse. -- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 -- signal handling that would cause this test to crash if it happened -- to receive an sinval catchup interrupt while the stack is deep: -@@ -19,6 +21,6 @@ +@@ -19,6 +20,6 @@ -- and primary error message. \set VERBOSITY sqlstate select infinite_recurse(); @@ -70864,7 +70629,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - + INHERITS (INSERT_TBL) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); +ERROR: relation "insert_child" does not exist INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); @@ -70905,7 +70670,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- check constraint is not there on child INSERT INTO ATACC2 (TEST) VALUES (-3); +ERROR: relation "atacc2" does not exist @@ -70931,7 +70696,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +CREATE TABLE ATACC2 () INHERITS (ATACC1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- check constraint is there on child INSERT INTO ATACC2 (TEST) VALUES (-3); -ERROR: new row for relation "atacc2" violates check constraint "atacc1_test_check" @@ -71264,7 +71029,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev BEGIN; INSERT INTO unique_tbl VALUES (1, 'five'); +ERROR: relation "unique_tbl" does not exist @@ -71534,7 +71299,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - + c1 CIRCLE, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- these should succeed because they don't match the index predicate INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); +ERROR: relation "circles" does not exist @@ -71573,7 +71338,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +ALTER TABLE circles ADD EXCLUDE USING gist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev -- try reindexing an existing constraint REINDEX INDEX circles_c1_c2_excl; +ERROR: at or near "circles_c1_c2_excl": syntax error: unimplemented: this syntax @@ -71663,7 +71428,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +SET SESSION AUTHORIZATION regress_constraint_comments + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -71671,7 +71436,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; +ERROR: at or near "constraint_comments_dom": syntax error @@ -71717,7 +71482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +SET SESSION AUTHORIZATION regress_constraint_comments_noaccess + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; -ERROR: must be owner of relation constraint_comments_tbl COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; @@ -71739,7 +71504,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/constraints.out - +DROP DOMAIN constraint_comments_dom + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP ROLE regress_constraint_comments; DROP ROLE regress_constraint_comments_noaccess; diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.out --label=/mnt/data1/postgres/src/test/regress/results/updatable_views.out /mnt/data1/postgres/src/test/regress/expected/updatable_views.out /mnt/data1/postgres/src/test/regress/results/updatable_views.out @@ -72596,23 +72361,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- view on top of view with triggers CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); -@@ -791,8 +793,15 @@ +@@ -791,8 +793,14 @@ END; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER rw_view1_ins_trig INSTEAD OF INSERT ON rw_view1 FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); -+ERROR: at or near "rw_view1_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER rw_view1_ins_trig INSTEAD OF INSERT ON rw_view1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT table_name, is_insertable_into FROM information_schema.tables WHERE table_name LIKE 'rw_view%' -@@ -811,7 +820,7 @@ +@@ -811,7 +819,7 @@ ORDER BY table_name; table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into ------------+--------------+--------------------+----------------------+----------------------+---------------------------- @@ -72621,20 +72385,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o rw_view2 | NO | NO | NO | NO | NO (2 rows) -@@ -829,6 +838,12 @@ +@@ -829,6 +837,9 @@ CREATE TRIGGER rw_view1_upd_trig INSTEAD OF UPDATE ON rw_view1 FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); -+ERROR: at or near "rw_view1_upd_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER rw_view1_upd_trig INSTEAD OF UPDATE ON rw_view1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT table_name, is_insertable_into FROM information_schema.tables WHERE table_name LIKE 'rw_view%' -@@ -847,7 +862,7 @@ +@@ -847,7 +858,7 @@ ORDER BY table_name; table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into ------------+--------------+--------------------+----------------------+----------------------+---------------------------- @@ -72643,20 +72404,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o rw_view2 | NO | NO | NO | NO | NO (2 rows) -@@ -865,6 +880,12 @@ +@@ -865,6 +876,9 @@ CREATE TRIGGER rw_view1_del_trig INSTEAD OF DELETE ON rw_view1 FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); -+ERROR: at or near "rw_view1_del_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER rw_view1_del_trig INSTEAD OF DELETE ON rw_view1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT table_name, is_insertable_into FROM information_schema.tables WHERE table_name LIKE 'rw_view%' -@@ -883,7 +904,7 @@ +@@ -883,7 +897,7 @@ ORDER BY table_name; table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into ------------+--------------+--------------------+----------------------+----------------------+---------------------------- @@ -72665,7 +72423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o rw_view2 | NO | NO | NO | NO | NO (2 rows) -@@ -900,31 +921,18 @@ +@@ -900,31 +914,18 @@ (4 rows) INSERT INTO rw_view2 VALUES (3, 'Row 3') RETURNING *; @@ -72703,7 +72461,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM rw_view2; a | b ---+------- -@@ -933,199 +941,286 @@ +@@ -933,199 +934,286 @@ (2 rows) EXPLAIN (costs off) UPDATE rw_view2 SET a=3 WHERE a=2; @@ -72803,7 +72561,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE base_tbl(a int, b text, c float); INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; @@ -72835,7 +72593,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rw_view2 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; SELECT * FROM base_tbl; -- ok a | b | c @@ -72902,7 +72660,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; RESET SESSION AUTHORIZATION; +ERROR: at or near "authorization": syntax error @@ -72916,7 +72674,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed -ERROR: permission denied for view rw_view1 @@ -72949,7 +72707,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE INSERT, DELETE ON base_tbl FROM regress_view_user2; GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2; RESET SESSION AUTHORIZATION; @@ -72964,7 +72722,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO base_tbl VALUES (5, 'Row 5', 5.0); -- not allowed -ERROR: permission denied for table base_tbl INSERT INTO rw_view1 VALUES ('Row 5', 5.0, 5); -- ok @@ -73006,7 +72764,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; SELECT * FROM rw_view1; -- not allowed -ERROR: permission denied for table base_tbl @@ -73031,7 +72789,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rw_view2 AS SELECT * FROM rw_view1; SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for view rw_view1 @@ -73063,11 +72821,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view1; a | b | c ---+-------+--- -@@ -1133,19 +1228,49 @@ +@@ -1133,19 +1221,49 @@ (1 row) SELECT * FROM rw_view1 FOR UPDATE; -- not allowed @@ -73086,7 +72844,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for view rw_view1 + a | b | c @@ -73110,7 +72868,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON rw_view1 TO regress_view_user2; SET SESSION AUTHORIZATION regress_view_user2; +ERROR: at or near "regress_view_user2": syntax error: unimplemented: this syntax @@ -73118,11 +72876,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; a | b | c ---+-------+--- -@@ -1153,12 +1278,27 @@ +@@ -1153,12 +1271,27 @@ (1 row) SELECT * FROM rw_view2 FOR UPDATE; -- not allowed @@ -73148,11 +72906,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view1; a | b | c ---+-------+--- -@@ -1172,167 +1312,341 @@ +@@ -1172,167 +1305,341 @@ (1 row) UPDATE rw_view1 SET b = 'foo' WHERE a = 1; @@ -73163,7 +72921,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; - a | b | c ----+-----+--- @@ -73189,7 +72947,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT UPDATE ON rw_view1 TO regress_view_user2; SET SESSION AUTHORIZATION regress_view_user2; +ERROR: at or near "regress_view_user2": syntax error: unimplemented: this syntax @@ -73197,7 +72955,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; - a | b | c ----+-----+--- @@ -73231,7 +72989,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view1; - a | b | c ----+-----+--- @@ -73257,7 +73015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; - a | b | c ----+-----+--- @@ -73294,7 +73052,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE base_tbl(a int, b text, c float); INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; @@ -73319,7 +73077,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM base_tbl; -- not allowed -ERROR: permission denied for table base_tbl + a | b | c @@ -73355,7 +73113,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON base_tbl TO regress_view_user2; GRANT UPDATE (a,c) ON base_tbl TO regress_view_user2; +ERROR: at or near "(": syntax error @@ -73369,7 +73127,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM base_tbl; -- ok - a | b | c ----+-------+--- @@ -73407,7 +73165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; SET SESSION AUTHORIZATION regress_view_user2; +ERROR: at or near "regress_view_user2": syntax error: unimplemented: this syntax @@ -73415,7 +73173,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed -ERROR: permission denied for view rw_view1 @@ -73430,7 +73188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE INSERT, DELETE ON base_tbl FROM regress_view_user2; GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2; SET SESSION AUTHORIZATION regress_view_user2; @@ -73439,7 +73197,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed -ERROR: permission denied for table base_tbl +ERROR: "rw_view1" is not a table @@ -73452,7 +73210,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; SET SESSION AUTHORIZATION regress_view_user2; +ERROR: at or near "regress_view_user2": syntax error: unimplemented: this syntax @@ -73460,7 +73218,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- ok +ERROR: "rw_view1" is not a table DELETE FROM rw_view1 WHERE aa=2; -- ok @@ -73490,7 +73248,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; ALTER VIEW rw_view1 SET (security_invoker = true); +ERROR: at or near "(": syntax error @@ -73514,7 +73272,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rw_view2 AS SELECT cc AS ccc, aa AS aaa, bb AS bbb FROM rw_view1; GRANT SELECT, UPDATE ON rw_view2 TO regress_view_user3; SELECT * FROM rw_view2; -- not allowed @@ -73546,11 +73304,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view1; -- ok bb | cc | aa -------+----+---- -@@ -1340,35 +1654,103 @@ +@@ -1340,35 +1647,103 @@ (1 row) UPDATE rw_view1 SET aa=aa, bb=bb; -- ok @@ -73564,7 +73322,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for view rw_view1 + ccc | aaa | bbb @@ -73581,7 +73339,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for view rw_view1 + ccc | aaa | bbb @@ -73598,7 +73356,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON rw_view1 TO regress_view_user2; GRANT UPDATE (bb, cc) ON rw_view1 TO regress_view_user2; +ERROR: at or near "(": syntax error @@ -73612,7 +73370,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for table base_tbl + ccc | aaa | bbb @@ -73629,7 +73387,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for table base_tbl + ccc | aaa | bbb @@ -73659,11 +73417,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- ok ccc | aaa | bbb -----+-----+------- -@@ -1376,23 +1758,50 @@ +@@ -1376,23 +1751,50 @@ (1 row) UPDATE rw_view2 SET aaa=aaa; -- not allowed @@ -73680,7 +73438,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for table base_tbl + ccc | aaa | bbb @@ -73716,11 +73474,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- ok ccc | aaa | bbb -----+-----+------- -@@ -1400,18 +1809,40 @@ +@@ -1400,18 +1802,40 @@ (1 row) UPDATE rw_view2 SET aaa=aaa; -- not allowed @@ -73744,7 +73502,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view1; -- not allowed -ERROR: permission denied for table base_tbl + bb | cc | aa @@ -73761,11 +73519,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- ok ccc | aaa | bbb -----+-----+------- -@@ -1419,11 +1850,18 @@ +@@ -1419,11 +1843,18 @@ (1 row) UPDATE rw_view2 SET aaa=aaa; -- not allowed @@ -73782,11 +73540,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- ok ccc | aaa | bbb -----+-----+------- -@@ -1431,22 +1869,44 @@ +@@ -1431,22 +1862,44 @@ (1 row) UPDATE rw_view2 SET aaa=aaa; -- not allowed @@ -73810,7 +73568,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- not allowed -ERROR: permission denied for table base_tbl + ccc | aaa | bbb @@ -73833,11 +73591,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +SET SESSION AUTHORIZATION regress_view_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rw_view2; -- ok ccc | aaa | bbb -----+-----+------- -@@ -1454,39 +1914,47 @@ +@@ -1454,39 +1907,47 @@ (1 row) UPDATE rw_view2 SET aaa=aaa; -- not allowed @@ -73900,19 +73658,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- Table having triggers CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); INSERT INTO base_tbl VALUES (1, 'Row 1'); -@@ -1503,21 +1971,35 @@ +@@ -1503,21 +1964,31 @@ END; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER rw_view1_ins_trig AFTER INSERT ON base_tbl FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); -+ERROR: at or near "rw_view1_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER rw_view1_ins_trig AFTER INSERT ON base_tbl -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE VIEW rw_view1 AS SELECT a AS aa, b AS bb FROM base_tbl; INSERT INTO rw_view1 VALUES (3, 'Row 3'); +ERROR: "rw_view1" is not a table @@ -73928,18 +73685,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o DROP VIEW rw_view1; DROP TRIGGER rw_view1_ins_trig on base_tbl; -+ERROR: at or near "rw_view1_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER rw_view1_ins_trig on base_tbl -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION rw_view1_trig_fn(); +ERROR: unknown function: rw_view1_trig_fn() DROP TABLE base_tbl; -- view with ORDER BY CREATE TABLE base_tbl (a int, b int); -@@ -1526,148 +2008,112 @@ +@@ -1526,148 +1997,112 @@ SELECT * FROM rw_view1; a | b ---+---- @@ -74134,7 +73888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT table_name, is_insertable_into FROM information_schema.tables -@@ -1675,10 +2121,7 @@ +@@ -1675,10 +2110,7 @@ ORDER BY table_name; table_name | is_insertable_into ------------+-------------------- @@ -74146,7 +73900,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT table_name, is_updatable, is_insertable_into FROM information_schema.views -@@ -1686,10 +2129,7 @@ +@@ -1686,10 +2118,7 @@ ORDER BY table_name; table_name | is_updatable | is_insertable_into ------------+--------------+-------------------- @@ -74158,7 +73912,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT table_name, column_name, is_updatable FROM information_schema.columns -@@ -1697,75 +2137,56 @@ +@@ -1697,75 +2126,56 @@ ORDER BY table_name, ordinal_position; table_name | column_name | is_updatable ------------+-------------+-------------- @@ -74247,14 +74001,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +CREATE TABLE base_tbl_child (CHECK (a > 0)) INHERITS (base_tbl_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO base_tbl_parent SELECT * FROM generate_series(-8, -1); INSERT INTO base_tbl_child SELECT * FROM generate_series(1, 8); +ERROR: relation "base_tbl_child" does not exist CREATE VIEW rw_view1 AS SELECT * FROM base_tbl_parent; CREATE VIEW rw_view2 AS SELECT * FROM ONLY base_tbl_parent; SELECT * FROM rw_view1 ORDER BY a; -@@ -1779,15 +2200,7 @@ +@@ -1779,15 +2189,7 @@ -3 -2 -1 @@ -74271,7 +74025,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM ONLY rw_view1 ORDER BY a; a -@@ -1800,15 +2213,7 @@ +@@ -1800,15 +2202,7 @@ -3 -2 -1 @@ -74288,7 +74042,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM rw_view2 ORDER BY a; a -@@ -1824,316 +2229,315 @@ +@@ -1824,316 +2218,315 @@ (8 rows) INSERT INTO rw_view1 VALUES (-100), (100); @@ -74353,7 +74107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +CREATE TABLE other_tbl_child () INHERITS (other_tbl_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO other_tbl_parent VALUES (7),(200); INSERT INTO other_tbl_child VALUES (8),(100); +ERROR: relation "other_tbl_child" does not exist @@ -74796,7 +74550,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- WITH CHECK OPTION with subquery CREATE TABLE base_tbl (a int); CREATE TABLE ref_tbl (a int PRIMARY KEY); -@@ -2142,40 +2546,34 @@ +@@ -2142,40 +2535,34 @@ SELECT * FROM base_tbl b WHERE EXISTS(SELECT 1 FROM ref_tbl r WHERE r.a = b.a) WITH CHECK OPTION; @@ -74858,19 +74612,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- WITH CHECK OPTION with BEFORE trigger on base table CREATE TABLE base_tbl (a int, b int); CREATE FUNCTION base_tbl_trig_fn() -@@ -2187,22 +2585,33 @@ +@@ -2187,22 +2574,32 @@ END; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER base_tbl_trig BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE base_tbl_trig_fn(); -+ERROR: at or near "base_tbl_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER base_tbl_trig BEFORE INSERT OR UPDATE ON base_tbl -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a < b WITH CHECK OPTION; +ERROR: at or near "with": syntax error +DETAIL: source SQL: @@ -74897,20 +74650,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o CREATE FUNCTION rw_view1_trig_fn() RETURNS trigger AS $$ -@@ -2220,78 +2629,144 @@ +@@ -2220,78 +2617,140 @@ END; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER rw_view1_trig INSTEAD OF INSERT OR UPDATE OR DELETE ON rw_view1 FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); -+ERROR: at or near "rw_view1_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER rw_view1_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a > 0 WITH LOCAL CHECK OPTION; +ERROR: at or near "with": syntax error @@ -74964,12 +74716,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- Neither local nor cascaded check options work with INSTEAD rules DROP TRIGGER rw_view1_trig ON rw_view1; -+ERROR: at or near "rw_view1_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER rw_view1_trig ON rw_view1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE RULE rw_view1_ins_rule AS ON INSERT TO rw_view1 DO INSTEAD INSERT INTO base_tbl VALUES (NEW.a, 10); +ERROR: at or near "rw_view1_ins_rule": syntax error: unimplemented: this syntax @@ -75073,7 +74822,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- security barrier view CREATE TABLE base_tbl (person text, visibility text); INSERT INTO base_tbl VALUES ('Tom', 'public'), -@@ -2299,6 +2774,7 @@ +@@ -2299,6 +2758,7 @@ ('Harry', 'public'); CREATE VIEW rw_view1 AS SELECT person FROM base_tbl WHERE visibility = 'public'; @@ -75081,7 +74830,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o CREATE FUNCTION snoop(anyelement) RETURNS boolean AS $$ -@@ -2308,6 +2784,29 @@ +@@ -2308,6 +2768,29 @@ END; $$ LANGUAGE plpgsql COST 0.000001; @@ -75111,7 +74860,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o CREATE OR REPLACE FUNCTION leakproof(anyelement) RETURNS boolean AS $$ -@@ -2317,30 +2816,23 @@ +@@ -2317,30 +2800,23 @@ $$ LANGUAGE plpgsql STRICT IMMUTABLE LEAKPROOF; SELECT * FROM rw_view1 WHERE snoop(person); @@ -75151,7 +74900,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o (1 row) SELECT table_name, is_updatable, is_insertable_into -@@ -2348,7 +2840,7 @@ +@@ -2348,7 +2824,7 @@ WHERE table_name = 'rw_view1'; table_name | is_updatable | is_insertable_into ------------+--------------+-------------------- @@ -75160,7 +74909,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o (1 row) SELECT table_name, column_name, is_updatable -@@ -2357,58 +2849,47 @@ +@@ -2357,58 +2833,47 @@ ORDER BY ordinal_position; table_name | column_name | is_updatable ------------+-------------+-------------- @@ -75244,7 +74993,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o (1 row) SELECT table_name, is_updatable, is_insertable_into -@@ -2416,7 +2897,7 @@ +@@ -2416,7 +2881,7 @@ WHERE table_name = 'rw_view2'; table_name | is_updatable | is_insertable_into ------------+--------------+-------------------- @@ -75253,7 +75002,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o (1 row) SELECT table_name, column_name, is_updatable -@@ -2425,61 +2906,34 @@ +@@ -2425,61 +2890,34 @@ ORDER BY ordinal_position; table_name | column_name | is_updatable ------------+-------------+-------------- @@ -75334,7 +75083,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- security barrier view on top of table with rules CREATE TABLE base_tbl(id int PRIMARY KEY, data text, deleted boolean); INSERT INTO base_tbl VALUES (1, 'Row 1', false), (2, 'Row 2', true); -@@ -2487,61 +2941,83 @@ +@@ -2487,61 +2925,83 @@ WHERE EXISTS (SELECT 1 FROM base_tbl t WHERE t.id = new.id) DO INSTEAD UPDATE base_tbl SET data = new.data, deleted = false WHERE id = new.id; @@ -75458,7 +75207,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- security barrier view based on inheritance set CREATE TABLE t1 (a int, b float, c text); CREATE INDEX t1_a_idx ON t1(a); -@@ -2549,83 +3025,71 @@ +@@ -2549,83 +3009,71 @@ SELECT i,i,'t1' FROM generate_series(1,10) g(i); ANALYZE t1; CREATE TABLE t11 (d text) INHERITS (t1); @@ -75467,7 +75216,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +CREATE TABLE t11 (d text) INHERITS (t1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE INDEX t11_a_idx ON t11(a); +ERROR: relation "t11" does not exist INSERT INTO t11 @@ -75481,7 +75230,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +CREATE TABLE t12 (e int[]) INHERITS (t1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE INDEX t12_a_idx ON t12(a); +ERROR: relation "t12" does not exist INSERT INTO t12 @@ -75498,7 +75247,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +CREATE TABLE t111 () INHERITS (t11, t12) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE INDEX t111_a_idx ON t111(a); +ERROR: relation "t111" does not exist INSERT INTO t111 @@ -75583,7 +75332,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM t1 WHERE a=100; -- Nothing should have been changed to 100 a | b | c ---+---+--- -@@ -2633,114 +3097,47 @@ +@@ -2633,114 +3081,47 @@ EXPLAIN (VERBOSE, COSTS OFF) UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; @@ -75723,7 +75472,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM vx1; a -@@ -2756,13 +3153,13 @@ +@@ -2756,13 +3137,13 @@ CREATE TABLE tx3 (c integer); CREATE VIEW vx1 AS SELECT a FROM tx1 WHERE EXISTS(SELECT 1 FROM tx2 JOIN tx3 ON b=c); INSERT INTO vx1 VALUES (1); @@ -75740,7 +75489,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM vx1; a -@@ -2781,13 +3178,13 @@ +@@ -2781,13 +3162,13 @@ ALTER TABLE tx3 DROP COLUMN d; CREATE VIEW vx1 AS SELECT a FROM tx1 WHERE EXISTS(SELECT 1 FROM tx2 JOIN tx3 ON b=c); INSERT INTO vx1 VALUES (1); @@ -75757,7 +75506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o SELECT * FROM vx1; a -@@ -2803,38 +3200,47 @@ +@@ -2803,38 +3184,47 @@ -- security barrier views, per bug #13988 -- CREATE TABLE t1 (a int, b text, c int); @@ -75818,7 +75567,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o DROP TABLE t2; DROP TABLE t1; -- -@@ -2843,31 +3249,45 @@ +@@ -2843,31 +3233,45 @@ CREATE TABLE t1 (a int); CREATE VIEW v1 WITH (security_barrier = true) AS SELECT * FROM t1; @@ -75882,7 +75631,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o DROP TABLE t1; -- -- Test CREATE OR REPLACE VIEW turning a non-updatable view into an -@@ -2876,85 +3296,120 @@ +@@ -2876,85 +3280,120 @@ CREATE TABLE t1 (a int, b text); CREATE VIEW v1 AS SELECT null::int AS a; CREATE OR REPLACE VIEW v1 AS SELECT * FROM t1 WHERE a > 0 WITH CHECK OPTION; @@ -76035,7 +75784,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o create table sometable (a int, b text); insert into sometable values (1, 'a'), (2, 'b'); create view wcowrtest_v2 as -@@ -2962,13 +3417,22 @@ +@@ -2962,13 +3401,22 @@ from wcowrtest r where r in (select s from sometable s where r.a = s.a) with check option; @@ -76060,7 +75809,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- Check INSERT .. ON CONFLICT DO UPDATE works correctly when the view's -- columns are named and ordered differently than the underlying table's. create table uv_iocu_tab (a text unique, b float); -@@ -2977,6 +3441,7 @@ +@@ -2977,6 +3425,7 @@ select b, b+1 as c, a, '2.0'::text as two from uv_iocu_tab; insert into uv_iocu_view (a, b) values ('xyxyxy', 1) on conflict (a) do update set b = uv_iocu_view.b; @@ -76068,7 +75817,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o select * from uv_iocu_tab; a | b --------+--- -@@ -2985,40 +3450,39 @@ +@@ -2985,40 +3434,39 @@ insert into uv_iocu_view (a, b) values ('xyxyxy', 1) on conflict (a) do update set b = excluded.b; @@ -76120,7 +75869,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o (1 row) drop view uv_iocu_view; -@@ -3028,112 +3492,179 @@ +@@ -3028,112 +3476,179 @@ create view uv_iocu_view as select b as bb, a as aa, uv_iocu_tab::text as cc from uv_iocu_tab; insert into uv_iocu_view (aa,bb) values (1,'x'); @@ -76225,7 +75974,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +set session authorization regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev create table base_tbl(a int unique, b text, c float); insert into base_tbl values (1,'xxx',1.0); create view rw_view1 as select b as bb, c as cc, a as aa from base_tbl; @@ -76249,7 +75998,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +set session authorization regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev insert into rw_view1 values ('yyy',2.0,1) on conflict (aa) do update set bb = excluded.cc; -- Not allowed -ERROR: permission denied for view rw_view1 @@ -76289,7 +76038,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +set session authorization regress_view_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev grant select (a,b) on base_tbl to regress_view_user2; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -76314,7 +76063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +set session authorization regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev create view rw_view2 as select b as bb, c as cc, a as aa from base_tbl; +ERROR: relation "root.public.rw_view2" already exists insert into rw_view2 (aa,bb) values (1,'xxx') @@ -76334,7 +76083,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o select * from base_tbl; a | b | c ---+-----+--- -@@ -3141,22 +3672,38 @@ +@@ -3141,22 +3656,38 @@ (1 row) set session authorization regress_view_user2; @@ -76343,7 +76092,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o +set session authorization regress_view_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev create view rw_view4 as select aa, bb, cc FROM rw_view1; +ERROR: column "aa" does not exist insert into rw_view4 (aa,bb) values (1,'yyy') @@ -76375,7 +76124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o drop view rw_view3; drop view rw_view2; drop view rw_view1; -@@ -3169,36 +3716,43 @@ +@@ -3169,36 +3700,43 @@ c text default 'Table default', d text, e text); create view base_tab_def_view as select * from base_tab_def; alter view base_tab_def_view alter b set default 'View default'; @@ -76436,23 +76185,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- Adding an INSTEAD OF trigger should cause NULLs to be inserted instead of -- table defaults, where there are no view defaults. -@@ -3211,8 +3765,15 @@ +@@ -3211,8 +3749,14 @@ end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger base_tab_def_view_instrig instead of insert on base_tab_def_view for each row execute function base_tab_def_view_instrig_func(); -+ERROR: at or near "base_tab_def_view_instrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger base_tab_def_view_instrig instead of insert on base_tab_def_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev truncate base_tab_def; insert into base_tab_def values (1); insert into base_tab_def values (2), (3); -@@ -3220,36 +3781,55 @@ +@@ -3220,36 +3764,52 @@ insert into base_tab_def values (5, default, default, default, default), (6, default, default, default, default); insert into base_tab_def_view values (11); @@ -76497,12 +76245,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- Using an unconditional DO INSTEAD rule should also cause NULLs to be -- inserted where there are no view defaults. drop trigger base_tab_def_view_instrig on base_tab_def_view; -+ERROR: at or near "base_tab_def_view_instrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger base_tab_def_view_instrig on base_tab_def_view -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop function base_tab_def_view_instrig_func; +ERROR: unknown function: base_tab_def_view_instrig_func() create rule base_tab_def_view_ins_rule as on insert to base_tab_def_view @@ -76525,7 +76270,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o truncate base_tab_def; insert into base_tab_def values (1); insert into base_tab_def values (2), (3); -@@ -3257,29 +3837,26 @@ +@@ -3257,29 +3817,26 @@ insert into base_tab_def values (5, default, default, default, default), (6, default, default, default, default); insert into base_tab_def_view values (11); @@ -76569,7 +76314,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o -- A DO ALSO rule should cause each row to be inserted twice. The first -- insert should behave the same as an auto-updatable view (using table -@@ -3287,8 +3864,38 @@ +@@ -3287,8 +3844,38 @@ -- behave the same as a rule-updatable view (inserting NULLs where there are -- no view defaults). drop rule base_tab_def_view_ins_rule on base_tab_def_view; @@ -76608,7 +76353,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o truncate base_tab_def; insert into base_tab_def values (1); insert into base_tab_def values (2), (3); -@@ -3296,56 +3903,71 @@ +@@ -3296,56 +3883,71 @@ insert into base_tab_def values (5, default, default, default, default), (6, default, default, default, default); insert into base_tab_def_view values (11); @@ -76714,7 +76459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/updatable_views.o drop view base_tab_def_view; drop table base_tab_def; -@@ -3353,14 +3975,22 @@ +@@ -3353,14 +3955,22 @@ create table base_tab (a serial, b int[], c text, d text default 'Table default'); create view base_tab_view as select c, a, b from base_tab; alter view base_tab_view alter column c set default 'View default'; @@ -76761,71 +76506,62 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION check_primary_key () RETURNS trigger AS :'refintlib' LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION check_foreign_key () RETURNS trigger AS :'refintlib' LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION trigger_return_old () RETURNS trigger AS :'regresslib' LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION set_ttdummy (int4) RETURNS int4 AS :'regresslib' LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev create table pkeys (pkey1 int4 not null, pkey2 text not null); create table fkeys (fkey1 int4, fkey2 text, fkey3 int); create table fkeys2 (fkey21 int4, fkey22 text, pkey23 int not null); -@@ -50,10 +67,22 @@ +@@ -50,10 +67,16 @@ for each row execute function check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); -+ERROR: at or near "check_fkeys_pkey_exist": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_fkeys_pkey_exist -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger check_fkeys_pkey2_exist before insert or update on fkeys for each row execute function check_primary_key ('fkey3', 'fkeys2', 'pkey23'); -+ERROR: at or near "check_fkeys_pkey2_exist": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_fkeys_pkey2_exist -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- -- For fkeys2: -- (fkey21, fkey22) --> pkeys (pkey1, pkey2) -@@ -63,11 +92,28 @@ +@@ -63,11 +86,25 @@ for each row execute procedure check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2'); -+ERROR: at or near "check_fkeys2_pkey_exist": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_fkeys2_pkey_exist -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Test comments COMMENT ON TRIGGER check_fkeys2_pkey_bad ON fkeys2 IS 'wrong'; -ERROR: trigger "check_fkeys2_pkey_bad" for table "fkeys2" does not exist @@ -76846,29 +76582,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- For pkeys: -- ON DELETE/UPDATE (pkey1, pkey2) CASCADE: -@@ -79,6 +125,12 @@ +@@ -79,6 +116,9 @@ execute procedure check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', 'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22'); -+ERROR: at or near "check_pkeys_fkey_cascade": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_pkeys_fkey_cascade -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- -- For fkeys2: -- ON DELETE/UPDATE (pkey23) RESTRICT: -@@ -88,59 +140,41 @@ +@@ -88,59 +128,38 @@ before delete or update on fkeys2 for each row execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3'); -+ERROR: at or near "check_fkeys2_fkey_restrict": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger check_fkeys2_fkey_restrict -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into fkeys2 values (10, '1', 1); insert into fkeys2 values (30, '3', 2); insert into fkeys2 values (40, '4', 5); @@ -76930,20 +76660,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la DROP TABLE pkeys; DROP TABLE fkeys; -@@ -150,6 +184,12 @@ +@@ -150,6 +169,9 @@ create trigger trigger_return_old before insert or delete or update on trigtest for each row execute procedure trigger_return_old(); -+ERROR: at or near "trigger_return_old": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigger_return_old -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest values(1, 'foo'); select * from trigtest; f1 | f2 -@@ -159,9 +199,9 @@ +@@ -159,9 +181,9 @@ update trigtest set f2 = f2 || 'bar'; select * from trigtest; @@ -76956,20 +76683,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) delete from trigtest; -@@ -173,21 +213,28 @@ +@@ -173,21 +195,27 @@ -- Also check what happens when such a trigger runs before or after others create function f1_times_10() returns trigger as $$ begin new.f1 := new.f1 * 10; return new; end $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger trigger_alpha before insert or update on trigtest for each row execute procedure f1_times_10(); -+ERROR: at or near "trigger_alpha": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigger_alpha -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest values(1, 'foo'); select * from trigtest; f1 | f2 @@ -76989,16 +76715,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) delete from trigtest; -@@ -199,18 +246,24 @@ +@@ -199,18 +227,21 @@ create trigger trigger_zed before insert or update on trigtest for each row execute procedure f1_times_10(); -+ERROR: at or near "trigger_zed": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigger_zed -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest values(1, 'foo'); select * from trigtest; - f1 | f2 @@ -77020,16 +76743,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) delete from trigtest; -@@ -220,18 +273,24 @@ +@@ -220,18 +251,21 @@ (0 rows) drop trigger trigger_alpha on trigtest; -+ERROR: at or near "trigger_alpha": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trigger_alpha on trigtest -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest values(1, 'foo'); select * from trigtest; f1 | f2 @@ -77049,20 +76769,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) delete from trigtest; -@@ -249,6 +308,12 @@ +@@ -249,6 +283,9 @@ create trigger trigger_return_old before insert or delete or update on trigtest for each row execute procedure trigger_return_old(); -+ERROR: at or near "trigger_return_old": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigger_return_old -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest values(1); select * from trigtest; a | b | c -@@ -266,32 +331,31 @@ +@@ -266,32 +303,31 @@ update trigtest set a = 2 where a = 1 returning *; a | b | c | d ---+---+-------+---- @@ -77102,27 +76819,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) drop table trigtest; -@@ -307,38 +371,49 @@ +@@ -307,38 +343,43 @@ for each row execute procedure ttdummy (price_on, price_off); -+ERROR: at or near "ttdummy": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger ttdummy -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger ttserial before insert or update on tttest for each row execute procedure autoinc (price_on, ttdummy_seq); -+ERROR: at or near "ttserial": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger ttserial -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into tttest values (1, 1, null); insert into tttest values (2, 2, null); insert into tttest values (3, 3, 0); @@ -77161,7 +76872,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (2 rows) -- change price for price_id == 3 -@@ -346,11 +421,9 @@ +@@ -346,11 +387,9 @@ select * from tttest; price_id | price_val | price_on | price_off ----------+-----------+----------+----------- @@ -77176,7 +76887,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- now we want to change pric_id in ALL tuples -- this gets us not what we need -@@ -358,77 +431,51 @@ +@@ -358,77 +397,51 @@ select * from tttest; price_id | price_val | price_on | price_off ----------+-----------+----------+----------- @@ -77266,39 +76977,32 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la drop table tttest; drop sequence ttdummy_seq; -@@ -443,149 +490,165 @@ +@@ -443,149 +456,137 @@ RAISE NOTICE ''trigger_func(%) called: action = %, when = %, level = %'', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; RETURN NULL; END;'; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_ins_stmt'); -+ERROR: at or near "before_ins_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_ins_stmt'); -+ERROR: at or near "after_ins_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- -- if neither 'FOR EACH ROW' nor 'FOR EACH STATEMENT' was specified, -- CREATE TRIGGER should default to 'FOR EACH STATEMENT' -- CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_table EXECUTE PROCEDURE trigger_func('after_upd_stmt'); -+ERROR: at or near "after_upd_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Both insert and update statement level triggers (before and after) should -- fire. Doesn't fire UPDATE before trigger, but only because one isn't -- defined. @@ -77309,12 +77013,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT CREATE TRIGGER after_upd_row_trig AFTER UPDATE ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_row'); -+ERROR: at or near "after_upd_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_row_trig AFTER UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO main_table DEFAULT VALUES; -NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT -NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT @@ -77331,7 +77032,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la ALTER TABLE main_table DROP CONSTRAINT main_table_a_key; +ERROR: unimplemented: cannot drop UNIQUE constraint "main_table_a_key" using ALTER TABLE DROP CONSTRAINT, use DROP INDEX CASCADE instead +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/42840/v24.2 ++See: https://go.crdb.dev/issue-v/42840/dev -- COPY should fire per-row and per-statement INSERT triggers COPY main_table (a, b) FROM stdin; -NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT @@ -77358,52 +77059,34 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- CREATE TRIGGER modified_a BEFORE UPDATE OF a ON main_table FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE trigger_func('modified_a'); -+ERROR: at or near "modified_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER modified_a BEFORE UPDATE OF a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER modified_any BEFORE UPDATE OF a ON main_table FOR EACH ROW WHEN (OLD.* IS DISTINCT FROM NEW.*) EXECUTE PROCEDURE trigger_func('modified_any'); -+ERROR: at or near "modified_any": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER modified_any BEFORE UPDATE OF a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER insert_a AFTER INSERT ON main_table FOR EACH ROW WHEN (NEW.a = 123) EXECUTE PROCEDURE trigger_func('insert_a'); -+ERROR: at or near "insert_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER insert_a AFTER INSERT ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER delete_a AFTER DELETE ON main_table FOR EACH ROW WHEN (OLD.a = 123) EXECUTE PROCEDURE trigger_func('delete_a'); -+ERROR: at or near "delete_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER delete_a AFTER DELETE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER insert_when BEFORE INSERT ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('insert_when'); -+ERROR: at or near "insert_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER insert_when BEFORE INSERT ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER delete_when AFTER DELETE ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('delete_when'); -+ERROR: at or near "delete_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER delete_when AFTER DELETE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, action_order, action_condition, action_orientation, action_timing, action_reference_old_table, action_reference_new_table @@ -77501,7 +77184,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la SELECT count(*) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_a'; count ------- -@@ -595,15 +658,51 @@ +@@ -595,15 +596,33 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_modified_a'; count ------- @@ -77510,111 +77193,72 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) DROP TRIGGER modified_modified_a ON main_table; -+ERROR: at or near "modified_modified_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER modified_modified_a ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER modified_any ON main_table; -+ERROR: at or near "modified_any": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER modified_any ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER insert_a ON main_table; -+ERROR: at or near "insert_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER insert_a ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER delete_a ON main_table; -+ERROR: at or near "delete_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER delete_a ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER insert_when ON main_table; -+ERROR: at or near "insert_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER insert_when ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER delete_when ON main_table; -+ERROR: at or near "delete_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER delete_when ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Test WHEN condition accessing system columns. create table table_with_oids(a int); insert into table_with_oids values (1); -@@ -611,57 +710,68 @@ +@@ -611,57 +630,47 @@ for each row when (new.tableoid = old.tableoid AND new.tableoid <> 0) execute procedure trigger_func('after_upd_oid_unchanged'); -+ERROR: at or near "oid_unchanged_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger oid_unchanged_trig after update on table_with_oids -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev update table_with_oids set a = a + 1; -NOTICE: trigger_func(after_upd_oid_unchanged) called: action = UPDATE, when = AFTER, level = ROW drop table table_with_oids; -- Test column-level triggers DROP TRIGGER after_upd_row_trig ON main_table; -+ERROR: at or near "after_upd_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER after_upd_row_trig ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER before_upd_a_row_trig BEFORE UPDATE OF a ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_a_row'); -+ERROR: at or near "before_upd_a_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_upd_a_row_trig BEFORE UPDATE OF a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_upd_b_row_trig AFTER UPDATE OF b ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_b_row'); -+ERROR: at or near "after_upd_b_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_b_row_trig AFTER UPDATE OF b ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_a_b_row'); -+ERROR: at or near "after_upd_a_b_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER before_upd_a_stmt_trig BEFORE UPDATE OF a ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_upd_a_stmt'); -+ERROR: at or near "before_upd_a_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_upd_a_stmt_trig BEFORE UPDATE OF a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_upd_b_stmt_trig AFTER UPDATE OF b ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_upd_b_stmt'); -+ERROR: at or near "after_upd_b_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_b_stmt_trig AFTER UPDATE OF b ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'after_upd_a_b_row_trig'; - pg_get_triggerdef -------------------------------------------------------------------------------------------------------------------------------------------------- @@ -77657,37 +77301,30 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Test case for bug with BEFORE trigger followed by AFTER trigger with WHEN -- -@@ -673,78 +783,131 @@ +@@ -673,78 +682,93 @@ RETURN NEW; END; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER some_trig_before BEFORE UPDATE ON some_t FOR EACH ROW EXECUTE PROCEDURE dummy_update_func('before'); -+ERROR: at or near "some_trig_before": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER some_trig_before BEFORE UPDATE ON some_t FOR EACH ROW -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER some_trig_aftera AFTER UPDATE ON some_t FOR EACH ROW WHEN (NOT OLD.some_col AND NEW.some_col) EXECUTE PROCEDURE dummy_update_func('aftera'); -+ERROR: at or near "some_trig_aftera": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER some_trig_aftera AFTER UPDATE ON some_t FOR EACH ROW -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER some_trig_afterb AFTER UPDATE ON some_t FOR EACH ROW WHEN (NOT NEW.some_col) EXECUTE PROCEDURE dummy_update_func('afterb'); -+ERROR: at or near "some_trig_afterb": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER some_trig_afterb AFTER UPDATE ON some_t FOR EACH ROW -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO some_t VALUES (TRUE); UPDATE some_t SET some_col = TRUE; -NOTICE: dummy_update_func(before) called: action = UPDATE, old = (t), new = (t) @@ -77704,80 +77341,62 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -ERROR: duplicate trigger events specified at or near "ON" -LINE 1: ...ER error_upd_and_col BEFORE UPDATE OR UPDATE OF a ON main_ta... - ^ -+ERROR: at or near "error_upd_and_col": syntax error: unimplemented: this syntax ++ERROR: at or near "on": syntax error: duplicate trigger events specified +DETAIL: source SQL: +CREATE TRIGGER error_upd_and_col BEFORE UPDATE OR UPDATE OF a ON main_table -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++ ^ CREATE TRIGGER error_upd_a_a BEFORE UPDATE OF a, a ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_upd_a_a'); -ERROR: column "a" specified more than once -+ERROR: at or near "error_upd_a_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER error_upd_a_a BEFORE UPDATE OF a, a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER error_ins_a BEFORE INSERT OF a ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_ins_a'); -ERROR: syntax error at or near "OF" -LINE 1: CREATE TRIGGER error_ins_a BEFORE INSERT OF a ON main_table - ^ -+ERROR: at or near "error_ins_a": syntax error: unimplemented: this syntax ++ERROR: at or near "of": syntax error +DETAIL: source SQL: +CREATE TRIGGER error_ins_a BEFORE INSERT OF a ON main_table -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++ ^ ++HINT: try \h CREATE TRIGGER CREATE TRIGGER error_ins_when BEFORE INSERT OR UPDATE ON main_table FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE trigger_func('error_ins_old'); -ERROR: INSERT trigger's WHEN condition cannot reference OLD values -LINE 2: FOR EACH ROW WHEN (OLD.a <> NEW.a) - ^ -+ERROR: at or near "error_ins_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER error_ins_when BEFORE INSERT OR UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER error_del_when BEFORE DELETE OR UPDATE ON main_table FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE trigger_func('error_del_new'); -ERROR: DELETE trigger's WHEN condition cannot reference NEW values -LINE 2: FOR EACH ROW WHEN (OLD.a <> NEW.a) - ^ -+ERROR: at or near "error_del_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER error_del_when BEFORE DELETE OR UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER error_del_when BEFORE INSERT OR UPDATE ON main_table FOR EACH ROW WHEN (NEW.tableoid <> 0) EXECUTE PROCEDURE trigger_func('error_when_sys_column'); -ERROR: BEFORE trigger's WHEN condition cannot reference NEW system columns -LINE 2: FOR EACH ROW WHEN (NEW.tableoid <> 0) - ^ -+ERROR: at or near "error_del_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER error_del_when BEFORE INSERT OR UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER error_stmt_when BEFORE UPDATE OF a ON main_table FOR EACH STATEMENT WHEN (OLD.* IS DISTINCT FROM NEW.*) EXECUTE PROCEDURE trigger_func('error_stmt_when'); -ERROR: statement trigger's WHEN condition cannot reference column values -LINE 2: FOR EACH STATEMENT WHEN (OLD.* IS DISTINCT FROM NEW.*) - ^ -+ERROR: at or near "error_stmt_when": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER error_stmt_when BEFORE UPDATE OF a ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- check dependency restrictions ALTER TABLE main_table DROP COLUMN b; -ERROR: cannot drop column b of table main_table because other objects depend on it @@ -77788,26 +77407,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- this should succeed, but we'll roll it back to keep the triggers around begin; DROP TRIGGER after_upd_a_b_row_trig ON main_table; -+ERROR: at or near "after_upd_a_b_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER after_upd_a_b_row_trig ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER after_upd_b_row_trig ON main_table; -+ERROR: at or near "after_upd_b_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER after_upd_b_row_trig ON main_table -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++ERROR: current transaction is aborted, commands ignored until end of transaction block DROP TRIGGER after_upd_b_stmt_trig ON main_table; -+ERROR: at or near "after_upd_b_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER after_upd_b_stmt_trig ON main_table -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER TABLE main_table DROP COLUMN b; +ERROR: current transaction is aborted, commands ignored until end of transaction block rollback; @@ -77815,47 +77421,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -create table trigtest (i serial primary key); +create sequence trigtestseq; +create table trigtest (i int primary key default nextval('trigtestseq')); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html -- test that disabling RI triggers works create table trigtest2 (i int references trigtest(i) on delete cascade); create function trigtest() returns trigger as $$ -@@ -752,39 +915,98 @@ +@@ -752,39 +776,88 @@ raise notice '% % % %', TG_TABLE_NAME, TG_OP, TG_WHEN, TG_LEVEL; return new; end;$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger trigtest_b_row_tg before insert or update or delete on trigtest for each row execute procedure trigtest(); -+ERROR: at or near "trigtest_b_row_tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigtest_b_row_tg before insert or update or delete on trigtest -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trigtest_a_row_tg after insert or update or delete on trigtest for each row execute procedure trigtest(); -+ERROR: at or near "trigtest_a_row_tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigtest_a_row_tg after insert or update or delete on trigtest -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trigtest_b_stmt_tg before insert or update or delete on trigtest for each statement execute procedure trigtest(); -+ERROR: at or near "trigtest_b_stmt_tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigtest_b_stmt_tg before insert or update or delete on trigtest -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trigtest_a_stmt_tg after insert or update or delete on trigtest for each statement execute procedure trigtest(); -+ERROR: at or near "trigtest_a_stmt_tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigtest_a_stmt_tg after insert or update or delete on trigtest -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigtest default values; -NOTICE: trigtest INSERT BEFORE STATEMENT -NOTICE: trigtest INSERT BEFORE ROW @@ -77928,7 +77524,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la select * from trigtest2; i --- -@@ -792,12 +1014,16 @@ +@@ -792,12 +865,16 @@ (1 row) alter table trigtest disable trigger all; @@ -77947,20 +77543,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- ensure we still insert, even when all triggers are disabled insert into trigtest default values; -@@ -869,48 +1095,28 @@ +@@ -869,48 +946,24 @@ end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); -+ERROR: at or near "show_trigger_data_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER show_trigger_data_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigger_test values(1,'insert'); -NOTICE: TG_NAME: show_trigger_data_trig -NOTICE: TG_WHEN: BEFORE @@ -77999,31 +77594,27 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -NOTICE: TG_ARGV: [23, skidoo] -NOTICE: OLD: (1,update) DROP TRIGGER show_trigger_data_trig on trigger_test; -+ERROR: at or near "show_trigger_data_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER show_trigger_data_trig on trigger_test -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION trigger_data(); +ERROR: unknown function: trigger_data() DROP TABLE trigger_test; -- -- Test use of row comparisons on OLD/NEW -@@ -926,21 +1132,22 @@ +@@ -926,21 +979,21 @@ end if; return new; end$$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER t BEFORE UPDATE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE mytrigger(); -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER t -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO trigger_test VALUES(1, 'foo', 'bar'); INSERT INTO trigger_test VALUES(2, 'baz', 'quux'); UPDATE trigger_test SET f3 = 'bar'; @@ -78039,11 +77630,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- the right way when considering nulls is CREATE OR REPLACE FUNCTION mytrigger() RETURNS trigger LANGUAGE plpgsql as $$ begin -@@ -951,17 +1158,13 @@ +@@ -951,17 +1004,15 @@ end if; return new; end$$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev UPDATE trigger_test SET f3 = 'bar'; -NOTICE: row 1 changed -NOTICE: row 2 changed @@ -78059,28 +77652,27 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- Test snapshot management in serializable transactions involving triggers -- per bug report in 6bc73d4c0910042358k3d1adff3qa36f8df75198ecea@mail.gmail.com CREATE FUNCTION serializable_update_trig() RETURNS trigger LANGUAGE plpgsql AS -@@ -973,6 +1176,7 @@ +@@ -973,6 +1024,9 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TABLE serializable_update_tab ( id int, filler text, -@@ -980,6 +1184,12 @@ +@@ -980,6 +1034,9 @@ ); CREATE TRIGGER serializable_update_trig BEFORE UPDATE ON serializable_update_tab FOR EACH ROW EXECUTE PROCEDURE serializable_update_trig(); -+ERROR: at or near "serializable_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER serializable_update_trig BEFORE UPDATE ON serializable_update_tab -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO serializable_update_tab SELECT a, repeat('xyzxz', 100), 'new' FROM generate_series(1, 50) a; BEGIN; -@@ -987,9 +1197,9 @@ +@@ -987,9 +1044,9 @@ UPDATE serializable_update_tab SET description = 'no no', id = 1 WHERE id = 1; COMMIT; SELECT description FROM serializable_update_tab WHERE id = 1; @@ -78093,16 +77685,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) DROP TABLE serializable_update_tab; -@@ -1002,9 +1212,15 @@ +@@ -1002,9 +1059,12 @@ CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); -+ERROR: at or near "z_min_update": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER z_min_update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \set QUIET false UPDATE min_updates_test SET f1 = f1; -UPDATE 0 @@ -78110,7 +77699,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la UPDATE min_updates_test SET f2 = f2 + 1; UPDATE 2 UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; -@@ -1022,6 +1238,7 @@ +@@ -1022,6 +1082,7 @@ -- Test triggers on views -- CREATE VIEW main_view AS SELECT a, b FROM main_table; @@ -78118,230 +77707,163 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- VIEW trigger function CREATE OR REPLACE FUNCTION view_trigger() RETURNS trigger LANGUAGE plpgsql AS $$ -@@ -1062,242 +1279,281 @@ +@@ -1062,242 +1123,205 @@ RETURN NULL; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- Before row triggers aren't allowed on views CREATE TRIGGER invalid_trig BEFORE INSERT ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_ins_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig BEFORE INSERT ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig BEFORE UPDATE ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig BEFORE UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig BEFORE DELETE ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_del_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig BEFORE DELETE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- After row triggers aren't allowed on views CREATE TRIGGER invalid_trig AFTER INSERT ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_ins_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig AFTER INSERT ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig AFTER UPDATE ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig AFTER UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig AFTER DELETE ON main_view FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_del_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have row-level BEFORE or AFTER triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig AFTER DELETE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Truncate triggers aren't allowed on views CREATE TRIGGER invalid_trig BEFORE TRUNCATE ON main_view EXECUTE PROCEDURE trigger_func('before_tru_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have TRUNCATE triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig BEFORE TRUNCATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig AFTER TRUNCATE ON main_view EXECUTE PROCEDURE trigger_func('before_tru_row'); -ERROR: "main_view" is a view -DETAIL: Views cannot have TRUNCATE triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig AFTER TRUNCATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- INSTEAD OF triggers aren't allowed on tables CREATE TRIGGER invalid_trig INSTEAD OF INSERT ON main_table FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_ins'); -ERROR: "main_table" is a table -DETAIL: Tables cannot have INSTEAD OF triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF INSERT ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_table FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); -ERROR: "main_table" is a table -DETAIL: Tables cannot have INSTEAD OF triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER invalid_trig INSTEAD OF DELETE ON main_table FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_del'); -ERROR: "main_table" is a table -DETAIL: Tables cannot have INSTEAD OF triggers. -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF DELETE ON main_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Don't support WHEN clauses with INSTEAD OF triggers CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE view_trigger('instead_of_upd'); -ERROR: INSTEAD OF triggers cannot have WHEN conditions -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Don't support column-level INSTEAD OF triggers CREATE TRIGGER invalid_trig INSTEAD OF UPDATE OF a ON main_view FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); -ERROR: INSTEAD OF triggers cannot have column lists -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF UPDATE OF a ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Don't support statement-level INSTEAD OF triggers CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view EXECUTE PROCEDURE view_trigger('instead_of_upd'); -ERROR: INSTEAD OF triggers must be FOR EACH ROW -+ERROR: at or near "invalid_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Valid INSTEAD OF triggers CREATE TRIGGER instead_of_insert_trig INSTEAD OF INSERT ON main_view FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_ins'); -+ERROR: at or near "instead_of_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER instead_of_insert_trig INSTEAD OF INSERT ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER instead_of_update_trig INSTEAD OF UPDATE ON main_view FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); -+ERROR: at or near "instead_of_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER instead_of_update_trig INSTEAD OF UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER instead_of_delete_trig INSTEAD OF DELETE ON main_view FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_del'); -+ERROR: at or near "instead_of_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER instead_of_delete_trig INSTEAD OF DELETE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Valid BEFORE statement VIEW triggers CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_ins_stmt'); -+ERROR: at or near "before_ins_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER before_upd_stmt_trig BEFORE UPDATE ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_upd_stmt'); -+ERROR: at or near "before_upd_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_upd_stmt_trig BEFORE UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER before_del_stmt_trig BEFORE DELETE ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_del_stmt'); -+ERROR: at or near "before_del_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER before_del_stmt_trig BEFORE DELETE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Valid AFTER statement VIEW triggers CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_ins_stmt'); -+ERROR: at or near "after_ins_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_upd_stmt'); -+ERROR: at or near "after_upd_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER after_del_stmt_trig AFTER DELETE ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_del_stmt'); -+ERROR: at or near "after_del_stmt_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER after_del_stmt_trig AFTER DELETE ON main_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \set QUIET false -- Insert into view using trigger INSERT INTO main_view VALUES (20, 30); @@ -78397,12 +77919,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- Remove table trigger to allow updates DROP TRIGGER before_upd_a_row_trig ON main_table; -DROP TRIGGER -+ERROR: at or near "before_upd_a_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER before_upd_a_row_trig ON main_table -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE main_view SET b = 31 WHERE a = 20; -NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) -NOTICE: main_view INSTEAD OF UPDATE ROW (instead_of_upd) @@ -78492,19 +78011,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la + ^ -- Test dropping view triggers DROP TRIGGER instead_of_insert_trig ON main_view; -+ERROR: at or near "instead_of_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER instead_of_insert_trig ON main_view -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER instead_of_delete_trig ON main_view; -+ERROR: at or near "instead_of_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER instead_of_delete_trig ON main_view -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \d+ main_view - View "public.main_view" - Column | Type | Collation | Nullable | Default | Storage | Description @@ -78545,63 +78058,60 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la country_name text unique not null, continent text not null ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html INSERT INTO country_table (country_name, continent) VALUES ('Japan', 'Asia'), ('UK', 'Europe'), -@@ -1316,6 +1572,7 @@ +@@ -1316,6 +1340,7 @@ population bigint, country_id int references country_table ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE VIEW city_view AS SELECT city_id, city_name, population, country_name, continent FROM city_table ci -@@ -1346,8 +1603,15 @@ +@@ -1346,8 +1371,14 @@ RETURN NEW; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER city_insert_trig INSTEAD OF INSERT ON city_view FOR EACH ROW EXECUTE PROCEDURE city_insert(); -+ERROR: at or near "city_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER city_insert_trig INSTEAD OF INSERT ON city_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE FUNCTION city_delete() RETURNS trigger LANGUAGE plpgsql AS $$ begin DELETE FROM city_table WHERE city_id = OLD.city_id; -@@ -1355,8 +1619,15 @@ +@@ -1355,8 +1386,14 @@ RETURN OLD; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER city_delete_trig INSTEAD OF DELETE ON city_view FOR EACH ROW EXECUTE PROCEDURE city_delete(); -+ERROR: at or near "city_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER city_delete_trig INSTEAD OF DELETE ON city_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE FUNCTION city_update() RETURNS trigger LANGUAGE plpgsql AS $$ declare ctry_id int; -@@ -1383,102 +1654,48 @@ +@@ -1383,102 +1420,47 @@ RETURN NEW; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER city_update_trig INSTEAD OF UPDATE ON city_view FOR EACH ROW EXECUTE PROCEDURE city_update(); -+ERROR: at or near "city_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER city_update_trig INSTEAD OF UPDATE ON city_view -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \set QUIET false -- INSERT .. RETURNING INSERT INTO city_view(city_name) VALUES('Tokyo') RETURNING *; @@ -78713,7 +78223,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la \set QUIET true -- read-only view with WHERE clause CREATE VIEW european_city_view AS -@@ -1486,26 +1703,48 @@ +@@ -1486,26 +1468,47 @@ SELECT count(*) FROM european_city_view; count ------- @@ -78723,15 +78233,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la CREATE FUNCTION no_op_trig_fn() RETURNS trigger LANGUAGE plpgsql AS 'begin RETURN NULL; end'; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER no_op_trig INSTEAD OF INSERT OR UPDATE OR DELETE ON european_city_view FOR EACH ROW EXECUTE PROCEDURE no_op_trig_fn(); -+ERROR: at or near "no_op_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER no_op_trig INSTEAD OF INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \set QUIET false INSERT INTO european_city_view VALUES (0, 'x', 10000, 'y', 'z'); -INSERT 0 0 @@ -78766,7 +78275,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la CREATE RULE european_city_update_rule AS ON UPDATE TO european_city_view DO INSTEAD UPDATE city_view SET city_name = NEW.city_name, -@@ -1513,47 +1752,57 @@ +@@ -1513,47 +1516,57 @@ country_name = NEW.country_name WHERE city_id = OLD.city_id RETURNING NEW.*; @@ -78850,7 +78359,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- join UPDATE test UPDATE city_view v SET population = 599657 FROM city_table ci, country_table co -@@ -1561,26 +1810,14 @@ +@@ -1561,26 +1574,14 @@ AND v.city_id = ci.city_id AND v.country_name = co.country_name RETURNING co.country_id, v.country_name, v.city_id, v.city_name, v.population; @@ -78881,51 +78390,48 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la DROP TABLE country_table; -- Test pg_trigger_depth() create table depth_a (id int not null primary key); -@@ -1595,8 +1832,15 @@ +@@ -1595,8 +1596,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger depth_a_tr before insert on depth_a for each row execute procedure depth_a_tf(); -+ERROR: at or near "depth_a_tr": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger depth_a_tr before insert on depth_a -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function depth_b_tf() returns trigger language plpgsql as $$ begin -@@ -1614,8 +1858,15 @@ +@@ -1614,8 +1621,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger depth_b_tr before insert on depth_b for each row execute procedure depth_b_tf(); -+ERROR: at or near "depth_b_tr": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger depth_b_tr before insert on depth_b -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function depth_c_tf() returns trigger language plpgsql as $$ begin -@@ -1627,50 +1878,30 @@ +@@ -1627,50 +1640,29 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger depth_c_tr before insert on depth_c for each row execute procedure depth_c_tf(); -+ERROR: at or near "depth_c_tr": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger depth_c_tr before insert on depth_c -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev select pg_trigger_depth(); - pg_trigger_depth ------------------- @@ -78977,67 +78483,63 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Test updates to rows during firing of BEFORE ROW triggers. -- As of 9.2, such cases should be rejected (see bug #6123). -@@ -1697,8 +1928,15 @@ +@@ -1697,8 +1689,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger parent_upd_trig before update on parent for each row execute procedure parent_upd_func(); -+ERROR: at or near "parent_upd_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_upd_trig before update on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function parent_del_func() returns trigger language plpgsql as $$ -@@ -1707,8 +1945,15 @@ +@@ -1707,8 +1705,14 @@ return old; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger parent_del_trig before delete on parent for each row execute procedure parent_del_func(); -+ERROR: at or near "parent_del_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_del_trig before delete on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function child_ins_func() returns trigger language plpgsql as $$ -@@ -1717,8 +1962,15 @@ +@@ -1717,8 +1721,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger child_ins_trig after insert on child for each row execute procedure child_ins_func(); -+ERROR: at or near "child_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_ins_trig after insert on child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function child_del_func() returns trigger language plpgsql as $$ -@@ -1727,14 +1979,21 @@ +@@ -1727,14 +1737,20 @@ return old; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger child_del_trig after delete on child for each row execute procedure child_del_func(); -+ERROR: at or near "child_del_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_del_trig after delete on child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parent values (1, 'a', 'a', 'a', 'a', 0); insert into child values (10, 1, 'b'); select * from parent; select * from child; @@ -79048,7 +78550,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) bid | aid | val1 -@@ -1743,12 +2002,10 @@ +@@ -1743,12 +1759,10 @@ (1 row) update parent set val1 = 'b' where aid = 1; -- should fail @@ -79062,7 +78564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la (1 row) bid | aid | val1 -@@ -1757,13 +2014,10 @@ +@@ -1757,13 +1771,10 @@ (1 row) delete from parent where aid = 1; -- should fail @@ -79077,15 +78579,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la bid | aid | val1 -----+-----+------ -@@ -1784,6 +2038,7 @@ +@@ -1784,6 +1795,9 @@ return old; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev delete from parent where aid = 1; select * from parent; select * from child; aid | val1 | val2 | val3 | val4 | bcnt -@@ -1792,13 +2047,18 @@ +@@ -1792,13 +1806,18 @@ bid | aid | val1 -----+-----+------ @@ -79105,39 +78609,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- similar case, but with a self-referencing FK so that parent and child -- rows can be affected by a single operation create temp table self_ref_trigger ( -@@ -1818,8 +2078,15 @@ +@@ -1818,8 +1837,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger self_ref_trigger_ins_trig before insert on self_ref_trigger for each row execute procedure self_ref_trigger_ins_func(); -+ERROR: at or near "self_ref_trigger_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger self_ref_trigger_ins_trig before insert on self_ref_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function self_ref_trigger_del_func() returns trigger language plpgsql as $$ -@@ -1831,8 +2098,15 @@ +@@ -1831,8 +1856,14 @@ return old; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger self_ref_trigger_del_trig before delete on self_ref_trigger for each row execute procedure self_ref_trigger_del_func(); -+ERROR: at or near "self_ref_trigger_del_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger self_ref_trigger_del_trig before delete on self_ref_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into self_ref_trigger values (1, null, 'root'); insert into self_ref_trigger values (2, 1, 'root child A'); insert into self_ref_trigger values (3, 1, 'root child B'); -@@ -1842,63 +2116,72 @@ +@@ -1842,63 +1873,68 @@ select * from self_ref_trigger; id | parent | data | nchildren ----+--------+--------------+----------- @@ -79182,32 +78684,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table stmt_trig_on_empty_upd1 () inherits (stmt_trig_on_empty_upd) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create function update_stmt_notice() returns trigger as $$ begin raise notice 'updating %', TG_TABLE_NAME; return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger before_stmt_trigger before update on stmt_trig_on_empty_upd execute procedure update_stmt_notice(); -+ERROR: at or near "before_stmt_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger before_stmt_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger before_stmt_trigger before update on stmt_trig_on_empty_upd1 execute procedure update_stmt_notice(); -+ERROR: at or near "before_stmt_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger before_stmt_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- inherited no-op update update stmt_trig_on_empty_upd set a = a where false returning a+1 as aa; -NOTICE: updating stmt_trig_on_empty_upd @@ -79230,19 +78728,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Check that index creation (or DDL in general) is prohibited in a trigger -- -@@ -1911,23 +2194,26 @@ +@@ -1911,23 +1947,27 @@ alter table trigger_ddl_table add primary key (col1); return new; end$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger trigger_ddl_func before insert on trigger_ddl_table for each row execute procedure trigger_ddl_func(); -+ERROR: at or near "trigger_ddl_func": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trigger_ddl_func before insert on trigger_ddl_table for each row -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into trigger_ddl_table values (1, 42); -- fail -ERROR: cannot ALTER TABLE "trigger_ddl_table" because it is being used by active queries in this session -CONTEXT: SQL statement "alter table trigger_ddl_table add primary key (col1)" @@ -79252,7 +78749,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create index on trigger_ddl_table (col2); return new; end$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev insert into trigger_ddl_table values (1, 42); -- fail -ERROR: cannot CREATE INDEX "trigger_ddl_table" because it is being used by active queries in this session -CONTEXT: SQL statement "create index on trigger_ddl_table (col2)" @@ -79263,35 +78762,33 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Verify behavior of before and after triggers with INSERT...ON CONFLICT -- DO UPDATE -@@ -1951,8 +2237,15 @@ +@@ -1951,8 +1991,14 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger upsert_before_trig before insert or update on upsert for each row execute procedure upsert_before_func(); -+ERROR: at or near "upsert_before_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger upsert_before_trig before insert or update on upsert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create function upsert_after_func() returns trigger language plpgsql as $$ -@@ -1966,58 +2259,41 @@ +@@ -1966,58 +2012,40 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger upsert_after_trig after insert or update on upsert for each row execute procedure upsert_after_func(); -+ERROR: at or near "upsert_after_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger upsert_after_trig after insert or update on upsert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into upsert values(1, 'black') on conflict (key) do update set color = 'updated ' || upsert.color; -WARNING: before insert (new): (1,black) -WARNING: after insert (new): (1,black) @@ -79357,21 +78854,20 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Verify that triggers with transition tables are not allowed on -- views -@@ -2025,78 +2301,148 @@ +@@ -2025,78 +2053,128 @@ create table my_table (i int); create view my_view as select * from my_table; create function my_trigger_function() returns trigger as $$ begin end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger my_trigger after update on my_view referencing old table as old_table for each statement execute procedure my_trigger_function(); -ERROR: "my_view" is a view -DETAIL: Triggers on views cannot have transition tables. -+ERROR: at or near "my_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_trigger after update on my_view referencing old table as old_table -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop function my_trigger_function(); +ERROR: unknown function: my_trigger_function() drop view my_view; @@ -79387,28 +78883,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h CREATE TABLE create function trigger_nothing() returns trigger language plpgsql as $$ begin end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger failed instead of update on parted_trig for each row execute procedure trigger_nothing(); -ERROR: "parted_trig" is a table -DETAIL: Tables cannot have INSTEAD OF triggers. -+ERROR: at or near "failed": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger failed instead of update on parted_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger failed after update on parted_trig referencing old table as old_table for each row execute procedure trigger_nothing(); -ERROR: "parted_trig" is a partitioned table -DETAIL: ROW triggers with transition tables are not supported on partitioned tables. -+ERROR: at or near "failed": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger failed after update on parted_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop table parted_trig; +ERROR: relation "parted_trig" does not exist -- @@ -79427,12 +78919,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la + ^ +HINT: try \h CREATE TABLE create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create table trigpart2 partition of trigpart for values from (1000) to (2000); +ERROR: at or near "partition": syntax error +DETAIL: source SQL: @@ -79486,30 +78975,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la drop trigger trg1 on trigpart1; -- fail -ERROR: cannot drop trigger trg1 on table trigpart1 because trigger trg1 on table trigpart requires it -HINT: You can drop trigger trg1 on table trigpart instead. -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger trg1 on trigpart2; -- fail -ERROR: cannot drop trigger trg1 on table trigpart2 because trigger trg1 on table trigpart requires it -HINT: You can drop trigger trg1 on table trigpart instead. -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger trg1 on trigpart3; -- fail -ERROR: cannot drop trigger trg1 on table trigpart3 because trigger trg1 on table trigpart requires it -HINT: You can drop trigger trg1 on table trigpart instead. -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop table trigpart2; -- ok, trigger should be gone in that partition +ERROR: relation "trigpart2" does not exist select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger @@ -79528,25 +79008,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +(0 rows) drop trigger trg1 on trigpart; -- ok, all gone -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; tgrelid | tgname | tgfoid -@@ -2105,74 +2451,171 @@ +@@ -2105,74 +2183,153 @@ -- check detach behavior create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \d trigpart3 - Table "public.trigpart3" - Column | Type | Collation | Nullable | Default @@ -79574,12 +79048,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE drop trigger trg1 on trigpart3; -- fail due to "does not exist" -ERROR: trigger "trg1" for table "trigpart3" does not exist -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter table trigpart detach partition trigpart4; +ERROR: at or near "detach": syntax error +DETAIL: source SQL: @@ -79588,12 +79059,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE drop trigger trg1 on trigpart41; -- fail due to "does not exist" -ERROR: trigger "trg1" for table "trigpart41" does not exist -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger trg1 on trigpart41 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop table trigpart4; +ERROR: relation "trigpart4" does not exist alter table trigpart attach partition trigpart3 for values from (2000) to (3000); @@ -79630,12 +79098,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create table trigpart3 (like trigpart); +ERROR: relation "trigpart" does not exist create trigger trg1 after insert on trigpart3 for each row execute procedure trigger_nothing(); -+ERROR: at or near "trg1": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trg1 after insert on trigpart3 for each row execute procedure trigger_nothing() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \d trigpart3 - Table "public.trigpart3" - Column | Type | Collation | Nullable | Default @@ -79665,19 +79130,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +ERROR: relation "trigpart3" does not exist -- check display of unrelated triggers create trigger samename after delete on trigpart execute function trigger_nothing(); -+ERROR: at or near "samename": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger samename after delete on trigpart execute function trigger_nothing() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger samename after delete on trigpart1 execute function trigger_nothing(); -+ERROR: at or near "samename": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger samename after delete on trigpart1 execute function trigger_nothing() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev \d trigpart1 - Table "public.trigpart1" - Column | Type | Collation | Nullable | Default @@ -79744,183 +79203,122 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create or replace function trigger_notice() returns trigger as $$ begin raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; -@@ -2182,136 +2625,301 @@ +@@ -2182,136 +2339,225 @@ return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- insert/update/delete statement-level triggers on the parent create trigger trig_ins_before before insert on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_before": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_before before insert on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_ins_after after insert on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_after": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_after after insert on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_before before update on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_before": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_before before update on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_after after update on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_after": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_after after update on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_before before delete on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_del_before": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_before before delete on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_after after delete on parted_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_del_after": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_after after delete on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- insert/update/delete row-level triggers on the parent create trigger trig_ins_after_parent after insert on parted_stmt_trig for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_after_parent": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_after_parent after insert on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_after_parent after update on parted_stmt_trig for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_after_parent": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_after_parent after update on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_after_parent after delete on parted_stmt_trig for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_del_after_parent": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_after_parent after delete on parted_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- insert/update/delete row-level triggers on the first partition create trigger trig_ins_before_child before insert on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_before_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_before_child before insert on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_ins_after_child after insert on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_after_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_after_child after insert on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_before_child before update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_before_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_before_child before update on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_after_child after update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_after_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_after_child after update on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_before_child before delete on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_del_before_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_before_child before delete on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_after_child after delete on parted_stmt_trig1 for each row execute procedure trigger_notice(); -+ERROR: at or near "trig_del_after_child": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_after_child after delete on parted_stmt_trig1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- insert/update/delete statement-level triggers on the parent create trigger trig_ins_before_3 before insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_before_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_before_3 before insert on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_ins_after_3 after insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_ins_after_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_ins_after_3 after insert on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_before_3 before update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_before_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_before_3 before update on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_upd_after_3 after update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_upd_after_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_upd_after_3 after update on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_before_3 before delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_del_before_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_before_3 before delete on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_del_after_3 after delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); -+ERROR: at or near "trig_del_after_3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_del_after_3 after delete on parted2_stmt_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev with ins (a) as ( insert into parted2_stmt_trig values (1), (2) returning a ) insert into parted_stmt_trig select a from ins returning tableoid::regclass, a; @@ -80036,40 +79434,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la + ^ +HINT: try \h CREATE TABLE create trigger zzz after insert on parted_trig for each row execute procedure trigger_notice(); -+ERROR: at or near "zzz": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger zzz after insert on parted_trig for each row execute procedure trigger_notice() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger mmm after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); -+ERROR: at or near "mmm": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger mmm after insert on parted_trig_1_1 for each row execute procedure trigger_notice() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger aaa after insert on parted_trig_1 for each row execute procedure trigger_notice(); -+ERROR: at or near "aaa": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger aaa after insert on parted_trig_1 for each row execute procedure trigger_notice() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger bbb after insert on parted_trig for each row execute procedure trigger_notice(); -+ERROR: at or near "bbb": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger bbb after insert on parted_trig for each row execute procedure trigger_notice() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger qqq after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); -+ERROR: at or near "qqq": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger qqq after insert on parted_trig_1_1 for each row execute procedure trigger_notice() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig values (50), (1500); -NOTICE: trigger aaa on parted_trig_1_1 AFTER INSERT for ROW -NOTICE: trigger bbb on parted_trig_1_1 AFTER INSERT for ROW @@ -80097,19 +79480,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create or replace function trigger_notice() returns trigger as $$ declare arg1 text = TG_ARGV[0]; -@@ -2322,21 +2930,39 @@ +@@ -2322,21 +2568,38 @@ return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger aaa after insert on parted_trig for each row execute procedure trigger_notice('quirky', 1); -+ERROR: at or near "aaa": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger aaa after insert on parted_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Verify propagation of trigger arguments to partitions attached after creating trigger create table parted_trig2 partition of parted_trig for values in (2); +ERROR: at or near "partition": syntax error @@ -80140,11 +79522,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create or replace function trigger_notice_ab() returns trigger as $$ begin raise notice 'trigger % on % % % for %: (a,b)=(%,%)', -@@ -2348,97 +2974,149 @@ +@@ -2348,97 +2611,148 @@ return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create table parted_irreg_ancestor (fd text, b text, fd2 int, fd3 int, a int) partition by range (b); +ERROR: at or near "EOF": syntax error @@ -80184,20 +79568,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE create trigger parted_trig after insert on parted_irreg for each row execute procedure trigger_notice_ab(); -+ERROR: at or near "parted_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parted_trig after insert on parted_irreg -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parted_trig_odd after insert on parted_irreg for each row when (bark(new.b) AND new.a % 2 = 1) execute procedure trigger_notice_ab(); -+ERROR: at or near "parted_trig_odd": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parted_trig_odd after insert on parted_irreg for each row -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- we should hear barking for every insert, but parted_trig_odd only emits -- noise for odd values of a. parted_trig does it for all inserts. insert into parted_irreg values (1, 'aardvark'), (2, 'aanimals'); @@ -80243,17 +79621,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev insert into parted values (1, 1, 'uno uno v1'); -- works +ERROR: relation "parted" does not exist create trigger t before insert or update or delete on parted for each row execute function parted_trigfunc(); -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger t before insert or update or delete on parted -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted values (1, 1, 'uno uno v2'); -- fail -ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported -DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_1_1". @@ -80268,7 +79645,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev insert into parted values (1, 1, 'uno uno v4'); -- fail -ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported -DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_1_1". @@ -80283,7 +79662,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev insert into parted values (1, 1, 'uno uno'); -- works +ERROR: relation "parted" does not exist update parted set c = c || ' v6'; -- works @@ -80319,19 +79700,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- both trigger and update change the partition create or replace function parted_trigfunc2() returns trigger language plpgsql as $$ begin -@@ -2446,381 +3124,719 @@ +@@ -2446,381 +2760,700 @@ return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger t2 before update on parted for each row execute function parted_trigfunc2(); -+ERROR: at or near "t2": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger t2 before update on parted -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev truncate table parted; +ERROR: relation "parted" does not exist insert into parted values (1, 1, 'uno uno v6'); @@ -80376,7 +79756,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la return new; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create table parted_1 partition of parted for values in (1, 2); +ERROR: at or near "partition": syntax error +DETAIL: source SQL: @@ -80391,12 +79773,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h CREATE TABLE create trigger t before insert or update on parted for each row execute function parted_trigfunc(); -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger t before insert or update on parted -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted values (0, 1, 'zero win'); +ERROR: relation "parted" does not exist insert into parted values (1, 1, 'one fail'); @@ -80459,7 +79838,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create constraint trigger parted_trig after insert on parted_constr_ancestor + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev create constraint trigger parted_trig_two after insert on parted_constr deferrable initially deferred for each row when (bark(new.b) AND new.a % 2 = 1) @@ -80469,7 +79848,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create constraint trigger parted_trig_two after insert on parted_constr + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev -- The immediate constraint is fired immediately; the WHEN clause of the -- deferred constraint is also called immediately. The deferred constraint -- is fired at commit time. @@ -80548,12 +79927,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE create trigger parted_trigger after update on parted_trigger for each row when (new.a % 2 = 1 and length(old.b) >= 2) execute procedure trigger_notice_ab(); -+ERROR: at or near "parted_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parted_trigger after update on parted_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create table parted_trigger_3 (b text, a int) partition by range (length(b)); +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -80623,7 +79999,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create constraint trigger parted_trigger after update on parted_trigger + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev create constraint trigger parted_trigger after update on unparted_trigger from parted_referenced for each row execute procedure trigger_notice_ab(); @@ -80632,7 +80008,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create constraint trigger parted_trigger after update on unparted_trigger + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev create table parted_trigger_3 (b text, a int) partition by range (length(b)); +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -80703,12 +80079,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE create trigger parted_trigger after update of b on parted_trigger for each row execute procedure trigger_notice_ab(); -+ERROR: at or near "parted_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parted_trigger after update of b on parted_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create table parted_trigger_3 (b text, a int) partition by range (length(b)); +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -80767,7 +80140,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +alter table trg_clone add constraint uniq unique (a) deferrable + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev create table trg_clone2 partition of trg_clone for values from (1000) to (2000); +ERROR: at or near "partition": syntax error +DETAIL: source SQL: @@ -80813,26 +80186,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table child1 () inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create function trig_nothing() returns trigger language plpgsql as $$ begin return null; end $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg after insert on parent for each row execute function trig_nothing(); -+ERROR: at or near "tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg after insert on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg after insert on child1 for each row execute function trig_nothing(); -+ERROR: at or near "tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg after insert on child1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter table parent disable trigger tg; +ERROR: at or near "disable": syntax error +DETAIL: source SQL: @@ -80881,20 +80250,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h CREATE TABLE create trigger tg after insert on parent for each row execute procedure trig_nothing(); -+ERROR: at or near "tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg after insert on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_stmt after insert on parent for statement execute procedure trig_nothing(); -+ERROR: at or near "tg_stmt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_stmt after insert on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev select tgrelid::regclass, tgname, tgenabled from pg_trigger where tgrelid in ('parent'::regclass, 'child1'::regclass) order by tgrelid::regclass::text, tgname; @@ -81048,14 +80411,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h CREATE TABLE CREATE OR REPLACE FUNCTION tgf() RETURNS trigger LANGUAGE plpgsql AS $$ begin raise exception 'except'; end $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER tg AFTER INSERT ON trgfire FOR EACH ROW EXECUTE FUNCTION tgf(); -+ERROR: at or near "tg": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER tg AFTER INSERT ON trgfire FOR EACH ROW EXECUTE FUNCTION tgf() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO trgfire VALUES (1); -ERROR: except -CONTEXT: PL/pgSQL function tgf() line 1 at RAISE @@ -81171,31 +80533,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Test the interaction between transition tables and both kinds of -- inheritance. We'll dump the contents of the transition tables in a -@@ -2836,6 +3852,7 @@ +@@ -2836,6 +3469,9 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create or replace function dump_update() returns trigger language plpgsql as $$ begin -@@ -2846,6 +3863,7 @@ +@@ -2846,6 +3482,9 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create or replace function dump_delete() returns trigger language plpgsql as $$ begin -@@ -2855,6 +3873,7 @@ +@@ -2855,6 +3494,9 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- -- Verify behavior of statement triggers on partition hierarchy with -- transition tables. Tuples should appear to each trigger in the -@@ -2862,120 +3881,256 @@ +@@ -2862,120 +3504,196 @@ -- -- set up a partition hierarchy with some different TupleDescriptors create table parent (a text, b int) partition by list (a); @@ -81231,111 +80599,75 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create trigger parent_insert_trig after insert on parent referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "parent_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parent_update_trig after update on parent referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "parent_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parent_delete_trig after delete on parent referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "parent_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_insert_trig after insert on child1 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_update_trig after update on child1 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_delete_trig after delete on child1 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_insert_trig after insert on child2 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child2_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_update_trig after update on child2 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child2_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_delete_trig after delete on child2 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child2_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_insert_trig after insert on child3 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child3_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_update_trig after update on child3 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child3_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_delete_trig after delete on child3 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child3_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, action_order, action_condition, action_orientation, action_timing, action_reference_old_table, action_reference_new_table @@ -81411,61 +80743,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +AAA 42 +^ drop trigger child1_update_trig on child1; -+ERROR: at or near "child1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child1_update_trig on child1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child1_delete_trig on child1; -+ERROR: at or near "child1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child1_delete_trig on child1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_insert_trig on child2; -+ERROR: at or near "child2_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_insert_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_update_trig on child2; -+ERROR: at or near "child2_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_update_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_delete_trig on child2; -+ERROR: at or near "child2_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_delete_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_insert_trig on child3; -+ERROR: at or near "child3_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_insert_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_update_trig on child3; -+ERROR: at or near "child3_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_update_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_delete_trig on child3; -+ERROR: at or near "child3_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_delete_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev delete from parent; -NOTICE: trigger = parent_delete_trig, old table = (AAA,42), (BBB,42), (CCC,42) -- copy into parent sees tuples collected from children even if there @@ -81481,7 +80789,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- insert into parent with a before trigger on a child tuple before -- insertion, and we capture the newly modified row in parent format create or replace function intercept_insert() returns trigger language plpgsql as -@@ -2985,41 +4140,102 @@ +@@ -2985,41 +3703,90 @@ return new; end; $$; @@ -81492,12 +80800,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create trigger intercept_insert_child3 before insert on child3 for each row execute procedure intercept_insert(); -+ERROR: at or near "intercept_insert_child3": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger intercept_insert_child3 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- insert, parent trigger sees post-modification parent-format tuple insert into parent values ('AAA', 42), ('BBB', 42), ('CCC', 66); -NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,1066) @@ -81539,12 +80844,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la after insert on child referencing new table as new_table for each row execute procedure dump_insert(); -ERROR: ROW triggers with transition tables are not supported on partitions -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_row_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- detaching it first works alter table parent detach partition child; +ERROR: at or near "detach": syntax error @@ -81555,12 +80857,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create trigger child_row_trig after insert on child referencing new table as new_table for each row execute procedure dump_insert(); -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_row_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- but now we're not allowed to reattach it alter table parent attach partition child for values in ('AAA'); -ERROR: trigger "child_row_trig" prevents table "child" from becoming a partition @@ -81572,12 +80871,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE -- drop the trigger, and now we're allowed to attach it again drop trigger child_row_trig on child; -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child_row_trig on child -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter table parent attach partition child for values in ('AAA'); +ERROR: at or near "attach": syntax error +DETAIL: source SQL: @@ -81589,7 +80885,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Verify behavior of statement triggers on (non-partition) -- inheritance hierarchy with transition tables; similar to the -@@ -3028,123 +4244,313 @@ +@@ -3028,123 +3795,244 @@ -- -- set up inheritance hierarchy with different TupleDescriptors create table parent (a text, b int); @@ -81601,7 +80897,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table child1 () inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- a child with a different column order create table child2 (b int, a text); +ERROR: relation "root.public.child2" already exists @@ -81618,115 +80914,79 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table child3 (c text) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create trigger parent_insert_trig after insert on parent referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "parent_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parent_update_trig after update on parent referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "parent_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parent_delete_trig after delete on parent referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "parent_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parent_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_insert_trig after insert on child1 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_update_trig after update on child1 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child1_delete_trig after delete on child1 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child1_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_insert_trig after insert on child2 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child2_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_update_trig after update on child2 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child2_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child2_delete_trig after delete on child2 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child2_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child2_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_insert_trig after insert on child3 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "child3_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_update_trig after update on child3 referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "child3_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger child3_delete_trig after delete on child3 referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "child3_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child3_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- insert directly into children sees respective child-format tuples insert into child1 values ('AAA', 42); -NOTICE: trigger = child1_insert_trig, new table = (AAA,42) @@ -81793,61 +81053,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +DDD 42 +^ drop trigger child1_update_trig on child1; -+ERROR: at or near "child1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child1_update_trig on child1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child1_delete_trig on child1; -+ERROR: at or near "child1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child1_delete_trig on child1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_insert_trig on child2; -+ERROR: at or near "child2_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_insert_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_update_trig on child2; -+ERROR: at or near "child2_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_update_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child2_delete_trig on child2; -+ERROR: at or near "child2_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child2_delete_trig on child2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_insert_trig on child3; -+ERROR: at or near "child3_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_insert_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_update_trig on child3; -+ERROR: at or near "child3_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_update_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop trigger child3_delete_trig on child3; -+ERROR: at or near "child3_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child3_delete_trig on child3 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev delete from parent; -NOTICE: trigger = parent_delete_trig, old table = (AAA,42), (BBB,42), (CCC,42), (DDD,42) drop table child1, child2, child3, parent; @@ -81864,18 +81100,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table child () inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- adding row trigger with transition table fails create trigger child_row_trig after insert on child referencing new table as new_table for each row execute procedure dump_insert(); -ERROR: ROW triggers with transition tables are not supported on inheritance children -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_row_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- disinheriting it first works alter table child no inherit parent; +ERROR: at or near "inherit": syntax error @@ -81886,12 +81119,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create trigger child_row_trig after insert on child referencing new table as new_table for each row execute procedure dump_insert(); -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger child_row_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- but now we're not allowed to make it inherit anymore alter table child inherit parent; -ERROR: trigger "child_row_trig" prevents table "child" from becoming an inheritance child @@ -81903,12 +81133,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE -- drop the trigger, and now we're allowed to make it inherit again drop trigger child_row_trig on child; -+ERROR: at or near "child_row_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger child_row_trig on child -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter table child inherit parent; +ERROR: at or near "inherit": syntax error +DETAIL: source SQL: @@ -81920,25 +81147,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Verify behavior of queries with wCTEs, where multiple transition -- tuplestores can be active at the same time because there are -@@ -3156,29 +4562,38 @@ +@@ -3156,29 +4044,32 @@ create trigger table1_trig after insert on table1 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "table1_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger table1_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger table2_trig after insert on table2 referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "table2_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger table2_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev with wcte as (insert into table1 values (42)) insert into table2 values ('hello world'); -NOTICE: trigger = table2_trig, new table = ("hello world") @@ -81972,25 +81193,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la drop table table1; drop table table2; -@@ -3190,60 +4605,101 @@ +@@ -3190,60 +4081,89 @@ create trigger my_table_insert_trig after insert on my_table referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "my_table_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_table_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger my_table_update_trig after update on my_table referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "my_table_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_table_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- inserts only insert into my_table values (1, 'AAA'), (2, 'BBB') on conflict (a) do @@ -82045,21 +81260,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create trigger iocdu_tt_parted_insert_trig after insert on iocdu_tt_parted referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "iocdu_tt_parted_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger iocdu_tt_parted_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger iocdu_tt_parted_update_trig after update on iocdu_tt_parted referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "iocdu_tt_parted_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger iocdu_tt_parted_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- inserts only insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB') on conflict (a) do @@ -82086,75 +81295,57 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Verify that you can't create a trigger with transition tables for -- more than one event. -@@ -3251,7 +4707,12 @@ +@@ -3251,7 +4171,9 @@ create trigger my_table_multievent_trig after insert or update on my_table referencing new table as new_table for each statement execute procedure dump_insert(); -ERROR: transition tables cannot be specified for triggers with more than one event -+ERROR: at or near "my_table_multievent_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_table_multievent_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- -- Verify that you can't create a trigger with transition tables with -- a column list. -@@ -3259,7 +4720,12 @@ +@@ -3259,7 +4181,9 @@ create trigger my_table_col_update_trig after update of b on my_table referencing new table as new_table for each statement execute procedure dump_insert(); -ERROR: transition tables cannot be specified for triggers with column lists -+ERROR: at or near "my_table_col_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_table_col_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev drop table my_table; -- -- Test firing of triggers with transition tables by foreign key cascades -@@ -3271,15 +4737,39 @@ +@@ -3271,15 +4195,27 @@ create trigger trig_table_before_trig before insert or update or delete on trig_table for each statement execute procedure trigger_func('trig_table'); -+ERROR: at or near "trig_table_before_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_table_before_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_table_insert_trig after insert on trig_table referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "trig_table_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_table_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_table_update_trig after update on trig_table referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "trig_table_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_table_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger trig_table_delete_trig after delete on trig_table referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "trig_table_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_table_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into refd_table values (1, 'one'), (2, 'two'), -@@ -3291,25 +4781,19 @@ +@@ -3291,25 +4227,19 @@ (2, 'two b'), (3, 'three a'), (3, 'three b'); @@ -82182,34 +81373,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la select * from trig_table; a | b ---+--------- -@@ -3326,27 +4810,42 @@ +@@ -3326,27 +4256,30 @@ create trigger self_ref_before_trig before delete on self_ref for each statement execute procedure trigger_func('self_ref'); -+ERROR: at or near "self_ref_before_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger self_ref_before_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger self_ref_r_trig after delete on self_ref referencing old table as old_table for each row execute procedure dump_delete(); -+ERROR: at or near "self_ref_r_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger self_ref_r_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger self_ref_s_trig after delete on self_ref referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "self_ref_s_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger self_ref_s_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into self_ref values (1, null), (2, 1), (3, 2); delete from self_ref where a = 1; -NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT @@ -82221,12 +81403,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -NOTICE: trigger = self_ref_s_trig, old table = (3,2) -- without AR trigger, cascaded deletes all end up in one transition table drop trigger self_ref_r_trig on self_ref; -+ERROR: at or near "self_ref_r_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger self_ref_r_trig on self_ref -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into self_ref values (1, null), (2, 1), (3, 2), (4, 3); delete from self_ref where a = 1; -NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT @@ -82234,38 +81413,29 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la drop table self_ref; -- -- test transition tables with MERGE -@@ -3355,12 +4854,30 @@ +@@ -3355,12 +4288,21 @@ create trigger merge_target_table_insert_trig after insert on merge_target_table referencing new table as new_table for each statement execute procedure dump_insert(); -+ERROR: at or near "merge_target_table_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger merge_target_table_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger merge_target_table_update_trig after update on merge_target_table referencing old table as old_table new table as new_table for each statement execute procedure dump_update(); -+ERROR: at or near "merge_target_table_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger merge_target_table_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger merge_target_table_delete_trig after delete on merge_target_table referencing old table as old_table for each statement execute procedure dump_delete(); -+ERROR: at or near "merge_target_table_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger merge_target_table_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create table merge_source_table (a int, b text); insert into merge_source_table values (1, 'initial1'), (2, 'initial2'), -@@ -3370,7 +4887,10 @@ +@@ -3370,7 +4312,10 @@ on t.a = s.a when not matched then insert values (a, b); @@ -82277,7 +81447,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la merge into merge_target_table t using merge_source_table s on t.a = s.a -@@ -3380,9 +4900,10 @@ +@@ -3380,9 +4325,10 @@ delete when not matched then insert values (a, b); @@ -82291,7 +81461,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la merge into merge_target_table t using merge_source_table s on t.a = s.a -@@ -3392,14 +4913,18 @@ +@@ -3392,14 +4338,18 @@ delete when not matched then insert values (a, b); @@ -82313,46 +81483,42 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- -- Tests for CREATE OR REPLACE TRIGGER -- -@@ -3409,100 +4934,229 @@ +@@ -3409,100 +4359,205 @@ raise notice 'hello from funcA'; return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create function funcB() returns trigger as $$ begin raise notice 'hello from funcB'; return null; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger my_trig after insert on my_table for each row execute procedure funcA(); -+ERROR: at or near "my_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger my_trig before insert on my_table for each row execute procedure funcB(); -- should fail -ERROR: trigger "my_trig" for relation "my_table" already exists -+ERROR: at or near "my_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into my_table values (1); -NOTICE: hello from funcA create or replace trigger my_trig before insert on my_table for each row execute procedure funcB(); -- OK -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into my_table values (2); -- this insert should become a no-op -NOTICE: hello from funcB table my_table; @@ -82401,22 +81567,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create or replace trigger my_trig after insert on parted_trig for each row execute procedure funcA(); -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcA +ERROR: relation "parted_trig" does not exist create or replace trigger my_trig after insert on parted_trig for each row execute procedure funcB(); -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcB +ERROR: relation "parted_trig" does not exist @@ -82424,11 +81586,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create or replace trigger my_trig after insert on parted_trig for each row execute procedure funcA(); -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcA +ERROR: relation "parted_trig" does not exist @@ -82436,33 +81596,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la after insert on parted_trig_1 for each row execute procedure funcB(); -- should fail -ERROR: trigger "my_trig" for relation "parted_trig_1" is an internal or a child trigger -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcA +ERROR: relation "parted_trig" does not exist drop trigger my_trig on parted_trig; -+ERROR: at or near "my_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+drop trigger my_trig on parted_trig -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); +ERROR: relation "parted_trig" does not exist -- test that user trigger can be overwritten by one defined at upper level create trigger my_trig after insert on parted_trig_1 for each row execute procedure funcA(); -+ERROR: at or near "my_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcA +ERROR: relation "parted_trig" does not exist @@ -82470,23 +81622,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la after insert on parted_trig for each row execute procedure funcB(); -- should fail -ERROR: trigger "my_trig" for relation "parted_trig_1" already exists -+ERROR: at or near "my_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger my_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcA +ERROR: relation "parted_trig" does not exist create or replace trigger my_trig after insert on parted_trig for each row execute procedure funcB(); -+ERROR: at or near "trigger": syntax error -+DETAIL: source SQL: -+create or replace trigger my_trig -+ ^ -+HINT: try \h CREATE ++ERROR: unimplemented: CREATE TRIGGER ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126359/dev insert into parted_trig (a) values (50); -NOTICE: hello from funcB +ERROR: relation "parted_trig" does not exist @@ -82506,15 +81653,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h CREATE TABLE create function trigger_parted_trigfunc() returns trigger language plpgsql as $$ begin end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger aft_row after insert or update on trigger_parted for each row execute function trigger_parted_trigfunc(); -+ERROR: at or near "aft_row": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger aft_row after insert or update on trigger_parted -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create table trigger_parted_p1 partition of trigger_parted for values in (1) partition by list (a); +ERROR: at or near "partition": syntax error @@ -82556,59 +81702,56 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la -- verify transition table conversion slot's lifetime -- https://postgr.es/m/39a71864-b120-5a5c-8cc5-c632b6f16761@amazon.com create table convslot_test_parent (col1 text primary key); -@@ -3524,6 +5178,7 @@ +@@ -3524,6 +4579,9 @@ (select string_agg(old_table::text, ', ' order by col1) from old_table); return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create function convslot_trig2() returns trigger language plpgsql -@@ -3534,11 +5189,17 @@ +@@ -3534,11 +4592,16 @@ (select string_agg(new_table::text, ', ' order by col1) from new_table); return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger but_trigger after update on convslot_test_child referencing new table as new_table for each statement execute function convslot_trig2(); -+ERROR: at or near "but_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger but_trigger after update on convslot_test_child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev update convslot_test_parent set col1 = col1 || '1'; -NOTICE: trigger = but_trigger, new table = (11,tutu), (31,tutu) create function convslot_trig3() returns trigger language plpgsql -@@ -3550,117 +5211,224 @@ +@@ -3550,117 +4613,203 @@ (select string_agg(new_table::text, ', ' order by col1) from new_table); return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger but_trigger2 after update on convslot_test_child referencing old table as old_table new table as new_table for each statement execute function convslot_trig3(); -+ERROR: at or near "but_trigger2": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger but_trigger2 after update on convslot_test_child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev update convslot_test_parent set col1 = col1 || '1'; -NOTICE: trigger = but_trigger, new table = (111,tutu), (311,tutu) -NOTICE: trigger = but_trigger2, old_table = (11,tutu), (31,tutu), new table = (111,tutu), (311,tutu) create trigger bdt_trigger after delete on convslot_test_child referencing old table as old_table for each statement execute function convslot_trig1(); -+ERROR: at or near "bdt_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger bdt_trigger after delete on convslot_test_child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev delete from convslot_test_parent; -NOTICE: trigger = bdt_trigger, old_table = (111,tutu), (311,tutu) drop table convslot_test_child, convslot_test_parent; @@ -82639,17 +81782,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER TABLE create function convslot_trig4() returns trigger as $$begin raise exception 'BOOM!'; end$$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger convslot_test_parent_update after update on convslot_test_parent referencing old table as old_rows new table as new_rows for each statement execute procedure convslot_trig4(); -+ERROR: at or near "convslot_test_parent_update": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger convslot_test_parent_update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into convslot_test_parent (id, val) values (1, 2); +ERROR: relation "convslot_test_parent" does not exist begin; @@ -82693,15 +81835,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la create function f () returns trigger as $$ begin return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger a after insert on grandparent for each row execute procedure f(); -+ERROR: at or near "a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger a after insert on grandparent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter trigger a on grandparent rename to b; +ERROR: at or near "trigger": syntax error +DETAIL: source SQL: @@ -82740,12 +81881,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER create trigger c after insert on middle for each row execute procedure f(); -+ERROR: at or near "c": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger c after insert on middle -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter trigger b on grandparent rename to c; -ERROR: trigger "c" for relation "middle" already exists +ERROR: at or near "trigger": syntax error @@ -82755,19 +81893,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +HINT: try \h ALTER -- Rename cascading does not affect statement triggers create trigger p after insert on grandparent for each statement execute function f(); -+ERROR: at or near "p": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger p after insert on grandparent for each statement execute function f() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger p after insert on middle for each statement execute function f(); -+ERROR: at or near "p": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger p after insert on middle for each statement execute function f() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter trigger p on grandparent rename to q; +ERROR: at or near "trigger": syntax error +DETAIL: source SQL: @@ -82803,23 +81935,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/triggers.out --la +create table child () inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create trigger parenttrig after insert on parent for each row execute procedure f(); -+ERROR: at or near "parenttrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parenttrig after insert on parent -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger parenttrig after insert on child for each row execute procedure f(); -+ERROR: at or near "parenttrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger parenttrig after insert on child -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev alter trigger parenttrig on parent rename to anothertrig; +ERROR: at or near "trigger": syntax error +DETAIL: source SQL: @@ -83023,7 +82149,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/delete.out --labe a INT, b text ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html INSERT INTO delete_test (a) VALUES (10); INSERT INTO delete_test (a, b) VALUES (50, repeat('x', 10000)); INSERT INTO delete_test (a) VALUES (100); @@ -83067,14 +82193,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE b (bb TEXT) INHERITS (a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE c (cc TEXT) INHERITS (a); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE c (cc TEXT) INHERITS (a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE d (dd TEXT) INHERITS (b,c,a); -NOTICE: merging multiple inherited definitions of column "aa" -NOTICE: merging multiple inherited definitions of column "aa" @@ -83083,7 +82209,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE d (dd TEXT) INHERITS (b,c,a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO a(aa) VALUES('aaa'); INSERT INTO a(aa) VALUES('aaaa'); INSERT INTO a(aa) VALUES('aaaaa'); @@ -83668,7 +82794,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TEMP TABLE z (b TEXT, PRIMARY KEY(aa, b)) inherits (a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO z VALUES (NULL, 'text'); -- should fail -ERROR: null value in column "aa" of relation "z" violates not-null constraint -DETAIL: Failing row contains (null, text). @@ -83681,7 +82807,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table some_tab_child () inherits (some_tab) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into some_tab_child values(1,2); +ERROR: relation "some_tab_child" does not exist explain (verbose, costs off) @@ -83737,7 +82863,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table foo2(f3 int) inherits (foo) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create temp table bar(f1 int, f2 int); create temp table bar2(f3 int) inherits (bar); +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -83745,7 +82871,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table bar2(f3 int) inherits (bar) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into foo values(1,1); insert into foo values(3,3); insert into foo2 values(2,2,2); @@ -83806,7 +82932,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table some_tab_child () inherits (some_tab) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into some_tab_child values (1); +ERROR: relation "some_tab_child" does not exist create table parted_tab (a int, b char) partition by list (a); @@ -83947,7 +83073,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE jointchild () INHERITS (firstparent, secondparent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE thirdparent (tomorrow date default now()::date - 1); CREATE TABLE otherchild () INHERITS (firstparent, thirdparent); -- not ok -NOTICE: merging multiple inherited definitions of column "tomorrow" @@ -83958,7 +83084,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE otherchild () INHERITS (firstparent, thirdparent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE otherchild (tomorrow date default now()) INHERITS (firstparent, thirdparent); -- ok, child resolves ambiguous default -NOTICE: merging multiple inherited definitions of column "tomorrow" @@ -83969,7 +83095,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab + INHERITS (firstparent, thirdparent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev DROP TABLE firstparent, secondparent, jointchild, thirdparent, otherchild; +ERROR: relation "jointchild" does not exist -- Test changing the type of inherited columns @@ -83977,7 +83103,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +ERROR: relation "d" does not exist alter table a alter column aa type integer using bit_length(aa); +ERROR: ALTER COLUMN TYPE from string to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` select * from d; @@ -83999,7 +83125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table childtab(f4 int) inherits(parent1, parent2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table parent1 alter column f1 type bigint; -- fail, conflict w/parent2 -ERROR: cannot alter inherited column "f1" of relation "childtab" alter table parent1 alter column f2 type bigint; -- ok @@ -84028,7 +83154,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table c1 () inherits (p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d p1 - Table "public.p1" - Column | Type | Collation | Nullable | Default @@ -84085,7 +83211,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table derived () inherits (base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table more_derived (like derived, b int) inherits (derived); -NOTICE: merging column "i" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -84093,7 +83219,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table more_derived (like derived, b int) inherits (derived) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into derived (i) values (0); +ERROR: relation "derived" does not exist select derived::base from derived; @@ -84153,7 +83279,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table c1(f3 int) inherits(p1,p2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into c1 values(123456789, 'hi', 42); +ERROR: relation "c1" does not exist select p2text(c1.*) from c1; @@ -84177,7 +83303,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE bc (bb TEXT) INHERITS (ac) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+----------+---------+------------+-------------+------------------ @@ -84292,7 +83418,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table bc (a int constraint check_a check (a <> 0), b int constraint check_b check (b <> 0)) inherits (ac) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+---------- @@ -84315,7 +83441,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table cc (c int constraint check_c check (c <> 0)) inherits (ac, bc) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+---------- @@ -84364,7 +83490,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table c1(f3 int) inherits(p1,p2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into c1 values(1,-1,2); +ERROR: relation "c1" does not exist alter table p2 add constraint cc check (f2>0); -- fail @@ -84386,7 +83512,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table c2(f3 int) inherits(p1,p2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d c2 - Table "public.c2" - Column | Type | Collation | Nullable | Default @@ -84417,7 +83543,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table c3 (f4 int) inherits(c1,c2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d c3 - Table "public.c3" - Column | Type | Collation | Nullable | Default @@ -84453,7 +83579,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table cc1 (f2 text, f3 int) inherits (pp1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table pp1 add column a1 int check (a1 > 0); \d cc1 - Table "public.cc1" @@ -84484,7 +83610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table cc2(f4 float) inherits(pp1,cc1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d cc2 - Table "public.cc2" - Column | Type | Collation | Nullable | Default @@ -84550,7 +83676,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE inhts (d int) INHERITS (inht1, inhs1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE inht1 RENAME a TO aa; ALTER TABLE inht1 RENAME b TO bb; -- to be failed -ERROR: cannot rename inherited column "b" @@ -84588,14 +83714,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE inht2 (x int) INHERITS (inht1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE inht3 (y int) INHERITS (inht1); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE inht3 (y int) INHERITS (inht1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE inht4 (z int) INHERITS (inht2, inht3); -NOTICE: merging multiple inherited definitions of column "aa" -NOTICE: merging multiple inherited definitions of column "b" @@ -84604,7 +83730,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE inht4 (z int) INHERITS (inht2, inht3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE inht1 RENAME aa TO aaa; \d+ inht4 - Table "public.inht4" @@ -84634,7 +83760,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE inhts (d int) INHERITS (inht2, inhs1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE inht1 RENAME aaa TO aaaa; ALTER TABLE inht1 RENAME b TO bb; -- to be failed -ERROR: cannot rename inherited column "b" @@ -84696,7 +83822,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE test_constraints_inh () INHERITS (test_constraints) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d+ test_constraints - Table "public.test_constraints" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -84720,7 +83846,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab ALTER TABLE ONLY test_constraints DROP CONSTRAINT test_constraints_val1_val2_key; +ERROR: unimplemented: cannot drop UNIQUE constraint "test_constraints_val1_val2_key" using ALTER TABLE DROP CONSTRAINT, use DROP INDEX CASCADE instead +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/42840/v24.2 ++See: https://go.crdb.dev/issue-v/42840/dev \d+ test_constraints - Table "public.test_constraints" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -84770,14 +83896,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab + c circle, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d+ test_ex_constraints - Table "public.test_ex_constraints" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -84843,7 +83969,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +CREATE TABLE test_foreign_constraints_inh () INHERITS (test_foreign_constraints) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d+ test_primary_constraints - Table "public.test_primary_constraints" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -84928,7 +84054,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table inh_fk_2_child () inherits (inh_fk_2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into inh_fk_2_child values (111, 1), (222, 2); +ERROR: relation "inh_fk_2_child" does not exist delete from inh_fk_1 where a = 1; @@ -84961,7 +84087,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table p1_c1() inherits(p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table p1 add constraint inh_check_constraint1 check (f1 > 0); alter table p1_c1 add constraint inh_check_constraint1 check (f1 > 0); -NOTICE: merging constraint "inh_check_constraint1" with inherited definition @@ -84992,7 +84118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table invalid_check_con_child() inherits(invalid_check_con) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table invalid_check_con_child add constraint inh_check_constraint check(f1 > 0) not valid; +ERROR: relation "invalid_check_con_child" does not exist alter table invalid_check_con add constraint inh_check_constraint check(f1 > 0); -- fail @@ -85039,7 +84165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table patest1() inherits (patest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into patest1 select x, x from generate_series(0,1000) x; +ERROR: relation "patest1" does not exist @@ -85049,7 +84175,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table patest2() inherits (patest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into patest2 select x, x from generate_series(0,1000) x; +ERROR: relation "patest2" does not exist @@ -85133,7 +84259,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab -- Test merge-append plans for inheritance trees -- create table matest0 (id serial primary key, name text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html create table matest1 (id integer primary key) inherits (matest0); -NOTICE: merging column "id" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -85141,7 +84267,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table matest1 (id integer primary key) inherits (matest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table matest2 (id integer primary key) inherits (matest0); -NOTICE: merging column "id" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -85149,7 +84275,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table matest2 (id integer primary key) inherits (matest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table matest3 (id integer primary key) inherits (matest0); -NOTICE: merging column "id" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -85157,7 +84283,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table matest3 (id integer primary key) inherits (matest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create index matest0i on matest0 ((1-id)); create index matest1i on matest1 ((1-id)); +ERROR: relation "matest1" does not exist @@ -85371,7 +84497,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table matest1 () inherits(matest0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create index matest0i on matest0 (b, c); create index matest1i on matest1 (b, c); +ERROR: relation "matest1" does not exist @@ -85811,7 +84937,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table cnullchild (check (f1 = 1 or f1 = null)) inherits(cnullparent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into cnullchild values(1); +ERROR: relation "cnullchild" does not exist insert into cnullchild values(2); @@ -85847,7 +84973,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create temp table inh_temp_child () inherits (inh_perm_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table inh_perm_child () inherits (inh_temp_parent); -- error -ERROR: cannot inherit from temporary relation "inh_temp_parent" +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -85855,14 +84981,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +create table inh_perm_child () inherits (inh_temp_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create temp table inh_temp_child_2 () inherits (inh_temp_parent); -- ok +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create temp table inh_temp_child_2 () inherits (inh_temp_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into inh_perm_parent values (1); insert into inh_temp_parent values (2); insert into inh_temp_child values (3); @@ -87192,7 +86318,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +set session authorization regress_no_child_access + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- without stats access, these queries would produce hash join plans: explain (costs off) select * from permtest_parent p1 inner join permtest_parent p2 @@ -87248,7 +86374,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/inherit.out --lab +set session authorization regress_no_child_access + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev explain (costs off) select p2.a, p1.c from permtest_parent p1 inner join permtest_parent p2 on p1.a = p2.a and p1.c ~ 'a1$'; @@ -88137,7 +87263,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/case.out --label= +CREATE DOMAIN foodomain AS text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION volfoo(text) returns foodomain as 'begin return $1::foodomain; end' language plpgsql volatile; +ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -88151,7 +87277,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/case.out --label= +CREATE OPERATOR = (procedure = inline_eq, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END; - case ------------- @@ -88170,7 +87296,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/case.out --label= +CREATE DOMAIN arrdomain AS int[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION make_ad(int,int) returns arrdomain as 'declare x arrdomain; begin @@ -88188,7 +87314,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/case.out --label= +CREATE OPERATOR = (procedure = ad_eq, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT CASE make_ad(1,2) WHEN array[2,4]::arrdomain THEN 'wrong' WHEN array[2,5]::arrdomain THEN 'still wrong' @@ -88243,7 +87369,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/select_into.out - +SET SESSION AUTHORIZATION regress_selinto_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- WITH DATA, passes. CREATE TABLE selinto_schema.tbl_withdata1 (a) AS SELECT generate_series(1,3) WITH DATA; @@ -88358,7 +87484,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/select_into.out - +SET SESSION AUTHORIZATION regress_selinto_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev RESET SESSION AUTHORIZATION; +ERROR: at or near "authorization": syntax error +DETAIL: source SQL: @@ -89618,7 +88744,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab DECLARE foo1 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev DECLARE foo2 SCROLL CURSOR FOR SELECT * FROM tenk2; +ERROR: current transaction is aborted, commands ignored until end of transaction block DECLARE foo3 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; @@ -90416,7 +89542,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab DECLARE foo25 SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev FETCH FROM foo25; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- @@ -90483,7 +89609,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab COMMIT; +ERROR: unimplemented: cursor foo25ns WITH HOLD must be closed before committing +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77101/v24.2 ++See: https://go.crdb.dev/issue-v/77101/dev FETCH FROM foo25ns; - unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 ----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- @@ -90578,7 +89704,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab commit; +ERROR: unimplemented: cursor c2 WITH HOLD must be closed before committing +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77101/v24.2 ++See: https://go.crdb.dev/issue-v/77101/dev delete from tt1; fetch all from c2; - count_tt1_v | count_tt1_s @@ -90603,7 +89729,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab DECLARE bc BINARY CURSOR FOR SELECT * FROM tenk1; +ERROR: unimplemented: DECLARE BINARY CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77099/v24.2 ++See: https://go.crdb.dev/issue-v/77099/dev SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors ORDER BY 1; - name | statement | is_holdable | is_binary | is_scrollable -------+----------------------------------------------------------------------+-------------+-----------+--------------- @@ -90952,7 +90078,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab +CREATE TEMP TABLE ucchild () inherits (uctest) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO ucchild values(100, 'hundred'); +ERROR: relation "ucchild" does not exist SELECT * FROM uctest; @@ -91101,7 +90227,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab DECLARE cx CURSOR WITH HOLD FOR SELECT * FROM uctest; +ERROR: unimplemented: DECLARE CURSOR WITH HOLD can only be used in transaction blocks +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77101/v24.2 ++See: https://go.crdb.dev/issue-v/77101/dev DELETE FROM uctest WHERE CURRENT OF cx; -- fail, can't use held cursor -ERROR: cursor "cx" is held from a previous transaction +ERROR: at or near "of": syntax error @@ -91238,14 +90364,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/portals.out --lab +CREATE TABLE current_check_1 () INHERITS (current_check) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE current_check_2 () INHERITS (current_check); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE current_check_2 () INHERITS (current_check) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i; +ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i; @@ -91904,7 +91030,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev set enable_hashagg to off; +ERROR: unimplemented: the configuration setting "enable_hashagg" is not supported +HINT: You have attempted to use a feature that is not yet implemented. @@ -92116,7 +91242,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev -- With a defined row type, the typcache can inspect the type's fields -- for hashability. create type ct1 as (f1 money); @@ -92125,7 +91251,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +create type ct1 as (f1 money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev explain (costs off) select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x); - QUERY PLAN @@ -92156,7 +91282,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev drop type ct1; +ERROR: type "ct1" does not exist set enable_hashagg to off; @@ -92595,7 +91721,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +CREATE TEMP TABLE t2c (primary key (ab)) INHERITS (t2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO t1c VALUES ('v', 'w'), ('c', 'd'), ('m', 'n'), ('e', 'f'); INSERT INTO t2c VALUES ('vw'), ('cd'), ('mn'), ('ef'); +ERROR: relation "t2c" does not exist @@ -92692,7 +91818,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/union.out --label +create table events_child () inherits (events) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev explain (costs off) select event_id from (select event_id from events @@ -93394,7 +92520,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subselect.out --l +create operator = (procedure=bogus_int8_text_eq, leftarg=int8, rightarg=text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev explain (costs off) select * from int8_tbl where q1 in (select c1 from inner_text); - QUERY PLAN @@ -94016,7 +93142,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subselect.out --l $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from explain_sq_limit(); - explain_sq_limit ----------------------------------------------------------------- @@ -94049,7 +93175,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subselect.out --l where i <> all (values (2),(3)); +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev move forward all in c1; +ERROR: current transaction is aborted, commands ignored until end of transaction block fetch backward all in c1; @@ -94353,7 +93479,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe + b int4[][][], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev CREATE TABLE array_op_test ( seqno int4, i int4[], @@ -94651,7 +93777,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{1,2,3},{4,5,6},{7,8,9}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2]; - int4 ---------------- @@ -94660,7 +93786,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{1,2,3},{4,5,6},{7,8,9}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[]; - int4 --------------------------------------- @@ -94683,7 +93809,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe -ERROR: number of array dimensions (7) exceeds the maximum allowed (6) +ERROR: unimplemented: multidimensional indexing: ARRAY[][1][2][3][4][5][6][7] +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- NULL index yields NULL when selecting SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1]; - int4 @@ -94693,7 +93819,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{{1},{2},{3}},{{4},{5},{6}}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL:1][1]; - int4 ------- @@ -94702,7 +93828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{{1},{2},{3}},{{4},{5},{6}}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][1:NULL][1]; - int4 ------- @@ -94711,7 +93837,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{{1},{2},{3}},{{4},{5},{6}}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- NULL index in assignment is an error UPDATE arrtest SET c[NULL] = '{"can''t assign"}' @@ -94762,7 +93888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe + b int2[][] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}'); +ERROR: relation "arrtest_s" does not exist INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}'); @@ -95159,7 +94285,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: unimplemented: multidimensional indexing: f[1][3][1] +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev SELECT ARRAY[[[[[['hello'],['world']]]]]]; - array ---------------------------- @@ -95923,7 +95049,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe -DETAIL: Unexpected "{" character. +ERROR: could not parse "{{1,{2}},{2,3}}" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '{{},{}}'::text[]; -ERROR: malformed array literal: "{{},{}}" -LINE 1: select '{{},{}}'::text[]; @@ -95931,7 +95057,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe -DETAIL: Unexpected "}" character. +ERROR: could not parse "{{},{}}" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select E'{{1,2},\\{2,3}}'::text[]; -ERROR: malformed array literal: "{{1,2},\{2,3}}" -LINE 1: select E'{{1,2},\\{2,3}}'::text[]; @@ -95939,7 +95065,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe -DETAIL: Unexpected "\" character. +ERROR: could not parse "{{1,2},\\{2,3}}" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '{{"1 2" x},{3}}'::text[]; -ERROR: malformed array literal: "{{"1 2" x},{3}}" -LINE 1: select '{{"1 2" x},{3}}'::text[]; @@ -95947,7 +95073,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe -DETAIL: Unexpected array element. +ERROR: could not parse "{{\"1 2\" x},{3}}" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '{}}'::text[]; -ERROR: malformed array literal: "{}}" -LINE 1: select '{}}'::text[]; @@ -95980,7 +95106,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}}" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '{0 second ,0 second}'::interval[]; - interval ---------------- @@ -95998,7 +95124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse "{ { \",\" } , { 3 } }" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select ' { { " 0 second " , 0 second } }'::text[]; - text -------------------------------- @@ -96007,7 +95133,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: could not parse " { { \" 0 second \" , 0 second } }" as type string[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select '{ 0 second, @ 1 hour @ 42 minutes @ 20 seconds @@ -96039,7 +95165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe +CREATE TEMP TABLE arraggtest ( f1 INT[], f2 TEXT[][], f3 FLOAT[]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev INSERT INTO arraggtest (f1, f2, f3) VALUES ('{1,2,3,4}','{{grey,red},{blue,blue}}','{1.6, 0.0}'); +ERROR: relation "arraggtest" does not exist @@ -96128,7 +95254,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe $$ language sql immutable; +ERROR: unimplemented: multidimensional indexing: ""[s1][s2] +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from unnest1(array[1,2,3]); unnest1 --------- @@ -96401,7 +95527,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: cardinality(): could not parse "{{1,2}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select cardinality('{{1,2},{3,4},{5,6}}'::int[]); - cardinality -------------- @@ -96410,7 +95536,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: cardinality(): could not parse "{{1,2},{3,4},{5,6}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select cardinality('{{{1,9},{5,6}},{{2,3},{3,4}}}'::int[]); - cardinality -------------- @@ -96419,7 +95545,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe - +ERROR: cardinality(): could not parse "{{{1,9},{5,6}},{{2,3},{3,4}}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- array_agg(anynonarray) select array_agg(unique1) from (select unique1 from tenk1 where unique1 < 15 order by unique1) ss; array_agg @@ -96535,7 +95661,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/arrays.out --labe create temp table t1 (f1 int8_tbl[]); +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev insert into t1 (f1[5].q1) values(42); +ERROR: at or near "[": syntax error +DETAIL: source SQL: @@ -97720,20 +96846,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS $$ begin -@@ -489,47 +476,62 @@ +@@ -489,47 +476,49 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER trans_updatetrig AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); -+ERROR: at or near "trans_updatetrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trans_updatetrig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96; -NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,110,1,), (b,13,98,2,), (b,15,106,16,), (b,17,106,19,) +ERROR: relation "range_parted" does not exist @@ -97758,21 +96883,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe CREATE TRIGGER trans_deletetrig AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); -+ERROR: at or near "trans_deletetrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trans_deletetrig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trans_inserttrig AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); -+ERROR: at or near "trans_inserttrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trans_inserttrig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; -NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,146,1,), (b,13,147,2,), (b,15,155,16,), (b,17,155,19,) +ERROR: relation "range_parted" does not exist @@ -97789,51 +96908,38 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe - +ERROR: relation "range_parted" does not exist DROP TRIGGER trans_deletetrig ON range_parted; -+ERROR: at or near "trans_deletetrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trans_deletetrig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trans_inserttrig ON range_parted; -+ERROR: at or near "trans_inserttrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trans_inserttrig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Don't drop trans_updatetrig yet. It is required below. -- Test with transition tuple conversion happening for rows moved into the -- new partition. This requires a trigger that references transition table -@@ -543,139 +545,306 @@ +@@ -543,139 +532,283 @@ NEW.b = NEW.b + 1; return NEW; END $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100 FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); -+ERROR: at or near "trig_c1_100": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); -+ERROR: at or near "trig_d1_15": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); -+ERROR: at or near "trig_d15_20": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev :init_range_parted; +ERROR: relation "range_parted" does not exist +ERROR: relation "range_parted" does not exist @@ -97892,33 +96998,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe - +ERROR: relation "range_parted" does not exist DROP TRIGGER trans_updatetrig ON range_parted; -+ERROR: at or near "trans_updatetrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trans_updatetrig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trig_c1_100 ON part_c_1_100; -+ERROR: at or near "trig_c1_100": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trig_c1_100 ON part_c_1_100 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trig_d1_15 ON part_d_1_15; -+ERROR: at or near "trig_d1_15": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trig_d1_15 ON part_d_1_15 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trig_d15_20 ON part_d_15_20; -+ERROR: at or near "trig_d15_20": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trig_d15_20 ON part_d_15_20 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION func_parted_mod_b(); +ERROR: unknown function: func_parted_mod_b() -- RLS policies with update-row-movement @@ -97953,7 +97047,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- This should fail with RLS violation error while moving row from -- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number. UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; @@ -97971,15 +97065,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa return NEW; END $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 FOR EACH ROW EXECUTE PROCEDURE func_d_1_15(); -+ERROR: at or near "trig_d_1_15": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev :init_range_parted; +ERROR: relation "range_parted" does not exist +ERROR: relation "range_parted" does not exist @@ -97989,7 +97082,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Here, RLS checks should succeed while moving row from part_a_10_a_20 to -- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the -- trigger at the destination partition again makes it an even number. @@ -98010,7 +97103,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- This should fail with RLS violation error. Even though the UPDATE is setting -- 'c' to an even number, the trigger at the destination partition again makes -- it an odd number. @@ -98025,12 +97118,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe + ^ +HINT: try \h RESET DROP TRIGGER trig_d_1_15 ON part_d_1_15; -+ERROR: at or near "trig_d_1_15": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trig_d_1_15 ON part_d_1_15 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION func_d_1_15(); +ERROR: unknown function: func_d_1_15() -- Policy expression contains SubPlan @@ -98057,7 +97147,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- fail, mintab has row with c1 = 120 UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200; -ERROR: new row violates row-level security policy "policy_range_parted_subplan" for table "range_parted" @@ -98088,7 +97178,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- ok, should pass the RLS check UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200; +ERROR: relation "range_parted" does not exist @@ -98107,7 +97197,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +SET SESSION AUTHORIZATION regress_range_parted_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- fail, the whole row RLS check should fail UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200; -ERROR: new row violates row-level security policy "policy_range_parted_wholerow" for table "range_parted" @@ -98149,111 +97239,77 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe CREATE FUNCTION trigfunc() returns trigger language plpgsql as $$ begin -@@ -684,268 +853,419 @@ +@@ -684,268 +817,345 @@ return null; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- Triggers on root partition CREATE TRIGGER parent_delete_trig AFTER DELETE ON range_parted for each statement execute procedure trigfunc(); -+ERROR: at or near "parent_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER parent_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER parent_update_trig AFTER UPDATE ON range_parted for each statement execute procedure trigfunc(); -+ERROR: at or near "parent_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER parent_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER parent_insert_trig AFTER INSERT ON range_parted for each statement execute procedure trigfunc(); -+ERROR: at or near "parent_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER parent_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Triggers on leaf partition part_c_1_100 CREATE TRIGGER c1_delete_trig AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc(); -+ERROR: at or near "c1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER c1_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER c1_update_trig AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc(); -+ERROR: at or near "c1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER c1_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER c1_insert_trig AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc(); -+ERROR: at or near "c1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER c1_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Triggers on leaf partition part_d_1_15 CREATE TRIGGER d1_delete_trig AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc(); -+ERROR: at or near "d1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d1_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER d1_update_trig AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc(); -+ERROR: at or near "d1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d1_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER d1_insert_trig AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc(); -+ERROR: at or near "d1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d1_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Triggers on leaf partition part_d_15_20 CREATE TRIGGER d15_delete_trig AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc(); -+ERROR: at or near "d15_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d15_delete_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER d15_update_trig AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc(); -+ERROR: at or near "d15_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d15_update_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER d15_insert_trig AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc(); -+ERROR: at or near "d15_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER d15_insert_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or -- insert statement triggers should be fired. UPDATE range_parted set c = c - 50 WHERE c > 97; @@ -98272,89 +97328,53 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe - +ERROR: relation "range_parted" does not exist DROP TRIGGER parent_delete_trig ON range_parted; -+ERROR: at or near "parent_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER parent_delete_trig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER parent_update_trig ON range_parted; -+ERROR: at or near "parent_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER parent_update_trig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER parent_insert_trig ON range_parted; -+ERROR: at or near "parent_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER parent_insert_trig ON range_parted -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER c1_delete_trig ON part_c_1_100; -+ERROR: at or near "c1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER c1_delete_trig ON part_c_1_100 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER c1_update_trig ON part_c_1_100; -+ERROR: at or near "c1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER c1_update_trig ON part_c_1_100 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER c1_insert_trig ON part_c_1_100; -+ERROR: at or near "c1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER c1_insert_trig ON part_c_1_100 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d1_delete_trig ON part_d_1_15; -+ERROR: at or near "d1_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d1_delete_trig ON part_d_1_15 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d1_update_trig ON part_d_1_15; -+ERROR: at or near "d1_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d1_update_trig ON part_d_1_15 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d1_insert_trig ON part_d_1_15; -+ERROR: at or near "d1_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d1_insert_trig ON part_d_1_15 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d15_delete_trig ON part_d_15_20; -+ERROR: at or near "d15_delete_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d15_delete_trig ON part_d_15_20 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d15_update_trig ON part_d_15_20; -+ERROR: at or near "d15_update_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d15_update_trig ON part_d_15_20 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER d15_insert_trig ON part_d_15_20; -+ERROR: at or near "d15_insert_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER d15_insert_trig ON part_d_15_20 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- Creating default partition for range :init_range_parted; +ERROR: relation "range_parted" does not exist @@ -98645,15 +97665,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe NEW.b = 2; -- This is changing partition key column. return NEW; END $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER parted_mod_b before update on sub_part1 for each row execute procedure func_parted_mod_b(); -+ERROR: at or near "parted_mod_b": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER parted_mod_b before update on sub_part1 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; - tableoid | a | b | c -------------+---+----+---- @@ -98679,28 +97698,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe - +ERROR: relation "list_parted" does not exist DROP TRIGGER parted_mod_b ON sub_part1; -+ERROR: at or near "parted_mod_b": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER parted_mod_b ON sub_part1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- If BR DELETE trigger prevented DELETE from happening, we should also skip -- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$ -@@ -953,76 +1273,126 @@ +@@ -953,76 +1163,122 @@ raise notice 'Trigger: Got OLD row %, but returning NULL', OLD; return NULL; END $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER trig_skip_delete before delete on sub_part2 for each row execute procedure func_parted_mod_b(); -+ERROR: at or near "trig_skip_delete": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_skip_delete before delete on sub_part2 -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE list_parted set b = 1 WHERE c = 70; -NOTICE: Trigger: Got OLD row (2,70,1), but returning NULL +ERROR: relation "list_parted" does not exist @@ -98716,12 +97731,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +ERROR: relation "list_parted" does not exist -- Drop the trigger. Now the row should be moved. DROP TRIGGER trig_skip_delete ON sub_part2; -+ERROR: at or near "trig_skip_delete": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trig_skip_delete ON sub_part2 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE list_parted set b = 1 WHERE c = 70; +ERROR: relation "list_parted" does not exist SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; @@ -98768,7 +97780,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/update.out --labe +create operator class custom_opclass for type int4 using hash as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create table hash_parted ( a int, b int @@ -99755,21 +98767,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create table minmaxtest1() inherits (minmaxtest) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table minmaxtest2() inherits (minmaxtest); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table minmaxtest2() inherits (minmaxtest) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table minmaxtest3() inherits (minmaxtest); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table minmaxtest3() inherits (minmaxtest) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create index minmaxtesti on minmaxtest(f1); create index minmaxtest1i on minmaxtest1(f1); +ERROR: relation "minmaxtest1" does not exist @@ -99991,7 +99003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create temp table t1c () inherits (t1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- Ensure we don't remove any columns when t1 has a child table explain (costs off) select * from t1 group by a,b,c,d; - QUERY PLAN @@ -101428,7 +100440,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_avg(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate my_sum(int4) ( stype = avg_state, @@ -101440,7 +100452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_sum(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- aggregate state should be shared as aggs are the same. select my_avg(one),my_avg(one) from (values(1),(3)) t(one); -NOTICE: avg_transfn called with 1 @@ -101551,7 +100563,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_sum_init(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate my_avg_init(int4) ( stype = avg_state, @@ -101564,7 +100576,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_avg_init(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate my_avg_init2(int4) ( stype = avg_state, @@ -101577,7 +100589,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_avg_init2(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- state should be shared if INITCONDs are matching select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one); -NOTICE: avg_transfn called with 1 @@ -101628,7 +100640,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_sum(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate my_half_sum(int4) ( stype = int4, @@ -101640,7 +100652,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +create aggregate my_half_sum(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- Agg state should be shared even though my_sum has no finalfn select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one); -NOTICE: sum_transfn called with 1 @@ -101667,7 +100679,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +CREATE AGGREGATE balk(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev SELECT balk(hundred) FROM tenk1; - balk ------- @@ -101711,7 +100723,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +CREATE AGGREGATE balk(int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- force use of parallelism ALTER TABLE tenk1 set (parallel_workers = 4); +ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -101744,7 +100756,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- ROLLBACK; -- test multiple usage of an aggregate whose finalfn returns a R/W datum BEGIN; -@@ -2767,6 +2523,8 @@ +@@ -2767,6 +2523,7 @@ RETURN array_fill(y[1], ARRAY[4]); END; $$; @@ -101752,7 +100764,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- CREATE FUNCTION rwagg_finalfunc(x anyarray) RETURNS anyarray LANGUAGE plpgsql STRICT IMMUTABLE AS $$ DECLARE -@@ -2777,11 +2535,18 @@ +@@ -2777,11 +2534,18 @@ RETURN res; END; $$; @@ -101767,11 +100779,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- +CREATE AGGREGATE rwagg(anyarray) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE FUNCTION eatarray(x real[]) RETURNS real[] LANGUAGE plpgsql STRICT IMMUTABLE AS $$ BEGIN -@@ -2789,21 +2554,35 @@ +@@ -2789,21 +2553,35 @@ RETURN x; END; $$; @@ -101812,7 +100824,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- -- variance(int4) covers numeric_poly_combine -- sum(int8) covers int8_avg_combine -- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg -@@ -2813,36 +2592,17 @@ +@@ -2813,36 +2591,17 @@ UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; @@ -101855,7 +100867,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- -- variance(int8) covers numeric_combine -- avg(numeric) covers numeric_avg_combine EXPLAIN (COSTS OFF, VERBOSE) -@@ -2851,46 +2611,22 @@ +@@ -2851,46 +2610,22 @@ UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; @@ -101910,7 +100922,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- -- Ensure that the STRICT checks for aggregates does not take NULLness -- of ORDER BY columns into account. See bug report around -- 2a505161-2727-2473-7c46-591ed108ac52@email.cz -@@ -2929,27 +2665,46 @@ +@@ -2929,27 +2664,46 @@ -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com set enable_memoize to off; @@ -101969,7 +100981,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- select unique1, count(*), sum(twothousand) from tenk1 group by unique1 having sum(fivethous) > 4975 -@@ -3007,12 +2762,48 @@ +@@ -3007,12 +2761,48 @@ (48 rows) set work_mem to default; @@ -102018,7 +101030,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- create table agg_data_2k as select g from generate_series(0, 1999) g; analyze agg_data_2k; -@@ -3021,19 +2812,28 @@ +@@ -3021,19 +2811,28 @@ analyze agg_data_20k; -- Produce results with sorting. set enable_hashagg = false; @@ -102056,7 +101068,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- create table agg_group_1 as select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; -@@ -3048,6 +2848,7 @@ +@@ -3048,6 +2847,7 @@ where g < r.a group by g/2) as s; set jit_above_cost to default; @@ -102064,7 +101076,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- create table agg_group_3 as select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 from agg_data_2k group by g/2; -@@ -3056,18 +2857,41 @@ +@@ -3056,18 +2856,41 @@ from agg_data_2k group by g/2; -- Produce results with hash aggregation set enable_hashagg = true; @@ -102113,7 +101125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- create table agg_hash_1 as select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; -@@ -3082,6 +2906,7 @@ +@@ -3082,6 +2905,7 @@ where g < r.a group by g/2) as s; set jit_above_cost to default; @@ -102121,7 +101133,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/aggregates.out -- create table agg_hash_3 as select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 from agg_data_2k group by g/2; -@@ -3089,7 +2914,31 @@ +@@ -3089,7 +2913,31 @@ select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 from agg_data_2k group by g/2; set enable_sort = true; @@ -105804,7 +104816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/join.out --label= +create temp table t2a () inherits (t2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into t2a values (200, 2001); +ERROR: relation "t2a" does not exist select * from t1 left join t2 on (t1.a = t2.a); @@ -106259,7 +105271,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/join.out --label= create index on tidv (idv); +ERROR: unimplemented: column idv is of type mycomptype and thus is not indexable +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/35730/v24.2 ++See: https://go.crdb.dev/issue-v/35730/dev explain (costs off) select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; - QUERY PLAN @@ -111538,7 +110550,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/security_label.ou +CREATE DOMAIN seclabel_domain AS text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev ALTER TABLE seclabel_tbl1 OWNER TO regress_seclabel_user1; ALTER TABLE seclabel_tbl2 OWNER TO regress_seclabel_user2; -- @@ -111600,7 +110612,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/security_label.ou +DROP DOMAIN seclabel_domain + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP VIEW seclabel_view1; DROP TABLE seclabel_tbl1; DROP TABLE seclabel_tbl2; @@ -111616,7 +110628,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_operator.out +CREATE OPERATOR === ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR !== ( PROCEDURE = int8ne, LEFTARG = bigint, @@ -111629,7 +110641,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_operator.out +CREATE OPERATOR !== ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev DROP OPERATOR !==(bigint, bigint); +ERROR: at or near "!": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -111690,7 +110702,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_operator.out +CREATE OPERATOR <| ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR |> ( PROCEDURE = int8gt, LEFTARG = bigint, @@ -111703,7 +110715,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/drop_operator.out +CREATE OPERATOR |> ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev DROP OPERATOR |>(bigint, bigint); +ERROR: at or near "|": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -112531,14 +111543,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/lock.out --label= +CREATE TABLE lock_tbl2 (b BIGINT) INHERITS (lock_tbl1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE lock_tbl3 () INHERITS (lock_tbl2); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE lock_tbl3 () INHERITS (lock_tbl2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev BEGIN TRANSACTION; LOCK TABLE lock_tbl1 * IN ACCESS EXCLUSIVE MODE; +ERROR: at or near "lock": syntax error @@ -112696,7 +111708,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/lock.out --label= LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev SELECT test_atomic_ops(); - test_atomic_ops ------------------ @@ -112816,7 +111828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.out --lab +CREATE DOMAIN testdomain_p AS text COLLATE "POSIX" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN testdomain_i AS int COLLATE "POSIX"; -- fail -ERROR: collations are not supported by type integer +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -112824,7 +111836,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.out --lab +CREATE DOMAIN testdomain_i AS int COLLATE "POSIX" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TABLE collate_test4 ( a int, b testdomain_p @@ -113093,7 +112105,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.out --lab +CREATE DOMAIN testdomain AS text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; - a | b ----+----- @@ -113853,9 +112865,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/replica_identity. + CONSTRAINT test_replica_identity_unique_defer UNIQUE (keya, keyb) DEFERRABLE, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev CREATE TABLE test_replica_identity_othertable (id serial primary key); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE INDEX test_replica_identity_keyab ON test_replica_identity (keya, keyb); +ERROR: relation "test_replica_identity" does not exist CREATE UNIQUE INDEX test_replica_identity_keyab_key ON test_replica_identity (keya, keyb); @@ -114338,7 +113350,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/replica_identity. diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.out --label=/mnt/data1/postgres/src/test/regress/results/object_address.out /mnt/data1/postgres/src/test/regress/expected/object_address.out /mnt/data1/postgres/src/test/regress/results/object_address.out --- /mnt/data1/postgres/src/test/regress/expected/object_address.out +++ /mnt/data1/postgres/src/test/regress/results/object_address.out -@@ -10,55 +10,231 @@ +@@ -10,55 +10,230 @@ CREATE SCHEMA addr_nsp; SET search_path TO 'addr_nsp'; CREATE FOREIGN DATA WRAPPER addr_fdw; @@ -114379,21 +113391,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou +CREATE TEXT SEARCH DICTIONARY addr_ts_dict (template=simple) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH CONFIGURATION addr_ts_conf (copy=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION addr_ts_conf (copy=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH TEMPLATE addr_ts_temp (lexize=dsimple_lexize); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH TEMPLATE addr_ts_temp (lexize=dsimple_lexize) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH PARSER addr_ts_prs (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -114401,12 +113413,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou +CREATE TEXT SEARCH PARSER addr_ts_prs + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TABLE addr_nsp.gentable ( a serial primary key CONSTRAINT a_chk CHECK (a > 0), b text DEFAULT 'hello' ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE addr_nsp.parttable ( a int PRIMARY KEY ) PARTITION BY RANGE (a); @@ -114443,23 +113455,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou +CREATE AGGREGATE addr_nsp.genaggr(int4) (sfunc = int4pl, stype = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE DOMAIN addr_nsp.gendomain AS int4 CONSTRAINT domconstr CHECK (value > 0); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN addr_nsp.gendomain AS int4 CONSTRAINT domconstr CHECK (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION addr_nsp.trig() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER t BEFORE INSERT ON addr_nsp.gentable FOR EACH ROW EXECUTE PROCEDURE addr_nsp.trig(); -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER t BEFORE INSERT ON addr_nsp.gentable FOR EACH ROW EXECUTE PROCEDURE addr_nsp.trig() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE POLICY genpol ON addr_nsp.gentable; +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -114575,7 +113586,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou -- unrecognized object types DO $$ DECLARE -@@ -75,21 +251,19 @@ +@@ -75,21 +250,19 @@ END LOOP; END; $$; @@ -114605,7 +113616,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou DO $$ DECLARE objtype text; -@@ -123,265 +297,65 @@ +@@ -123,265 +296,65 @@ END LOOP; END; $$; @@ -114902,7 +113913,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou -- Make sure that NULL handling is correct. \pset null 'NULL' -- Temporarily disable fancy output, so as future additions never create -@@ -455,84 +429,75 @@ +@@ -455,84 +428,75 @@ pg_identify_object_as_address(classid, objid, objsubid) AS ioa (typ, nms, args), pg_get_object_address(typ, nms, ioa.args) AS addr2 ORDER BY addr1.classid, addr1.objid, addr1.objsubid; @@ -115048,7 +114059,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/object_address.ou DROP OWNED BY regress_addr_user; DROP USER regress_addr_user; -- -@@ -592,47 +557,6 @@ +@@ -592,47 +556,6 @@ AS descr FROM objects ORDER BY objects.classid, objects.objid, objects.objsubid; @@ -115366,7 +114377,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- ok ALTER TABLE itest4 ALTER COLUMN a DROP NOT NULL; -- error, disallowed -ERROR: column "a" of relation "itest4" is an identity column -+ERROR: relation "itest4" (980): conflicting NULL/NOT NULL declarations for column "a" ++ERROR: relation "itest4" (982): conflicting NULL/NOT NULL declarations for column "a" ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, already set ERROR: column "a" of relation "itest4" is already an identity column ALTER TABLE itest4 ALTER COLUMN b ADD GENERATED ALWAYS AS IDENTITY; -- error, wrong data type @@ -115745,7 +114756,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la ALTER TABLE itest13 ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY; +ERROR: nextval(): unimplemented: cannot evaluate scalar expressions containing sequence operations in this context +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/42508/v24.2 ++See: https://go.crdb.dev/issue-v/42508/dev SELECT * FROM itest13; - a | b | c ----+---+--- @@ -115800,7 +114811,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la ALTER COLUMN c SET GENERATED ALWAYS; +ERROR: nextval(): unimplemented: cannot evaluate scalar expressions containing sequence operations in this context +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/42508/v24.2 ++See: https://go.crdb.dev/issue-v/42508/dev \d itest3 - Table "public.itest3" - Column | Type | Collation | Nullable | Default @@ -115869,7 +114880,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la +CREATE TABLE itest7a (b text) INHERITS (itest7) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- make column identity in child table CREATE TABLE itest7b (a int); CREATE TABLE itest7c (a int GENERATED ALWAYS AS IDENTITY) INHERITS (itest7b); @@ -115879,7 +114890,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la +CREATE TABLE itest7c (a int GENERATED ALWAYS AS IDENTITY) INHERITS (itest7b) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO itest7c DEFAULT VALUES; +ERROR: relation "itest7c" does not exist SELECT * FROM itest7c; @@ -115896,7 +114907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la +CREATE TABLE itest7e () INHERITS (itest7d) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE itest7d ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; ALTER TABLE itest7d ADD COLUMN b int GENERATED ALWAYS AS IDENTITY; -- error -ERROR: cannot recursively add identity column to table that has child tables @@ -116059,7 +115070,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/identity.out --la DROP TYPE itest_type CASCADE; +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- table partitions (currently not supported) CREATE TABLE itest_parent (f1 date NOT NULL, f2 text, f3 bigint) PARTITION BY RANGE (f1); +ERROR: at or near "EOF": syntax error @@ -116449,7 +115460,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + from gstest2 group by rollup (a,b) order by rsum, a, b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- nesting with grouping sets select sum(c) from gstest2 group by grouping sets((), grouping sets((), grouping sets(()))) @@ -116467,7 +115478,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets((), grouping sets((), grouping sets(()))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets((), grouping sets((), grouping sets(((a, b))))) order by 1 desc; @@ -116486,7 +115497,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets((), grouping sets((), grouping sets(((a, b))))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) order by 1 desc; @@ -116506,7 +115517,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(a, grouping sets(a, cube(b))) order by 1 desc; @@ -116527,7 +115538,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(a, grouping sets(a, cube(b))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(grouping sets((a, (b)))) order by 1 desc; @@ -116544,7 +115555,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(grouping sets((a, (b)))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(grouping sets((a, b))) order by 1 desc; @@ -116561,7 +115572,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(grouping sets((a, b))) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(grouping sets(a, grouping sets(a), a)) order by 1 desc; @@ -116581,7 +115592,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(grouping sets(a, grouping sets(a), a)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) order by 1 desc; @@ -116611,7 +115622,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(c) from gstest2 group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) order by 1 desc; @@ -116633,7 +115644,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- empty input: first is 0 rows, second 1, third 3 etc. select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - a | b | sum | count @@ -116645,7 +115656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); - a | b | sum | count ----+---+-----+------- @@ -116657,7 +115668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- @@ -116671,7 +115682,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - sum | count ------+------- @@ -116685,7 +115696,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- empty input with joins tests some important code paths select t1.a, t2.b, sum(t1.v), count(*) from gstest_empty t1, gstest_empty t2 group by grouping sets ((t1.a,t2.b),()); @@ -116700,7 +115711,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets ((t1.a,t2.b),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- simple joins, var resolution, GROUPING on join vars select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) from gstest1 t1, gstest2 t2 @@ -116848,7 +115859,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by alias1, rollup(alias2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- check that pulled-up subquery outputs still go to null when appropriate select four, x from (select four, ten, 'foo'::text as x from tenk1) as t @@ -116866,7 +115877,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets (four, x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select four, x || 'x' from (select four, ten, 'foo'::text as x from tenk1) as t group by grouping sets (four, x) @@ -116887,7 +115898,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets (four, x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select (x+y)*1, sum(z) from (select 1 as x, 2 as y, 3 as z) s group by grouping sets (x+y, x); @@ -116904,7 +115915,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets (x+y, x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select x, not x as not_x, q2 from (select *, q1 = 1 as x from int8_tbl i1) as t group by grouping sets(x, q2) @@ -116925,7 +115936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(x, q2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- check qual push-down rules for a subquery with grouping sets explain (verbose, costs off) select * from ( @@ -116970,7 +115981,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets(1, 2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- check handling of pulled-up SubPlan in GROUPING() argument (bug #17479) explain (verbose, costs off) select grouping(ss.x) @@ -117097,7 +116108,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by rollup (a,b) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select * from (values (1),(2)) v(x), lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s; @@ -117111,7 +116122,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- min max optimization should still work with GROUP BY () explain (costs off) select min(unique1) from tenk1 GROUP BY (); @@ -117258,7 +116269,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, c, d from gstest2 group by rollup(a,b),grouping sets(c,d) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a); - a | b ----+--- @@ -117271,7 +116282,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Tests for chained aggregates select a, b, grouping(a,b), sum(v), count(*), max(v) from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; @@ -117379,7 +116390,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + from gstest2 group by cube (a,b) order by rsum, a, b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b, sum(c) from (values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14),(2,3,15),(3,3,16),(3,4,17),(4,1,18),(4,1,19)) v(a,b,c) group by rollup (a,b); - a | b | sum ----+---+----- @@ -117402,7 +116413,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(c) from (values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14),(2,3,15),(3,3,16),(3,4,17),(4,1,18),(4,1,19)) v(a,b,c) group by rollup (a,b) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) group by cube (a,b) order by a,b; @@ -117429,7 +116440,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by cube (a,b) order by a,b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Test reordering of grouping sets explain (costs off) select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a; @@ -117487,7 +116498,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(c), count(*) from gstest2 group by grouping sets (rollup(a,b),a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- HAVING queries select ten, sum(distinct four) from onek a group by grouping sets((ten,four),(ten)) @@ -117527,7 +116538,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +group by grouping sets((ten,four),(ten)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Tests around pushdown of HAVING clauses, partially testing against previous bugs select a,count(*) from gstest2 group by rollup(a) order by a; - a | count @@ -117542,7 +116553,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a,count(*) from gstest2 group by rollup(a) order by a + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - a | count ----+------- @@ -117555,7 +116566,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; - QUERY PLAN @@ -117756,7 +116767,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +group by rollup(ten) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- More rescan tests select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; - a | a | four | ten | count @@ -117838,7 +116849,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -117851,7 +116862,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Grouping on text columns select sum(ten) from onek group by two, rollup(four::text) order by 1; - sum @@ -117869,7 +116880,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select sum(ten) from onek group by two, rollup(four::text) order by 1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select sum(ten) from onek group by rollup(four::text), two order by 1; - sum ------- @@ -117886,7 +116897,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select sum(ten) from onek group by rollup(four::text), two order by 1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- hashing support set enable_hashagg = true; +ERROR: unimplemented: the configuration setting "enable_hashagg" is not supported @@ -117910,7 +116921,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id)); -ERROR: could not implement GROUP BY -DETAIL: Some of the datatypes only support hashing, while others only support sorting. @@ -117919,7 +116930,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- simple cases select a, b, grouping(a,b), sum(v), count(*), max(v) from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; @@ -118061,7 +117072,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- mixed hashable/sortable cases select unhashable_col, unsortable_col, grouping(unhashable_col, unsortable_col), @@ -118190,7 +117201,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); - QUERY PLAN @@ -118217,7 +117228,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - a | b | sum | count ----+---+-----+------- @@ -118231,7 +117242,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); - QUERY PLAN @@ -118262,7 +117273,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); - QUERY PLAN @@ -118343,7 +117354,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by grouping sets (a,b) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) @@ -118379,7 +117390,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select * from (values (1),(2)) v(x), @@ -118473,7 +117484,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + from gstest2 group by cube (a,b) order by rsum, a, b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum from gstest2 group by cube (a,b) order by rsum, a, b; @@ -118523,7 +117534,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + group by cube (a,b) order by a,b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev explain (costs off) select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) @@ -118599,7 +117610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev SET LOCAL enable_seqscan = false; +ERROR: current transaction is aborted, commands ignored until end of transaction block EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; @@ -118635,7 +117646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev COMMIT; -- More rescan tests select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; @@ -118718,7 +117729,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -118731,7 +117742,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Rescan logic changes when there are no empty grouping sets, so test -- that too: select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten; @@ -118772,7 +117783,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a); - array ---------------------------------------------------------------------------------- @@ -118785,7 +117796,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- test the knapsack set enable_indexscan = false; +ERROR: unimplemented: the configuration setting "enable_indexscan" is not supported @@ -119184,7 +118195,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out + bug_16784 group by cube(i,j)) s + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- -- Compare results between plans using sorting and plans using hash -- aggregation. Force spilling in both cases by setting work_mem low @@ -119273,7 +118284,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +from gs_data_1 group by cube (g1000, g100,g10) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- Produce results with hash aggregation. set enable_hashagg = true; +ERROR: unimplemented: the configuration setting "enable_hashagg" is not supported @@ -119333,7 +118344,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +from gs_data_1 group by cube (g1000, g100,g10) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev set enable_sort = true; +ERROR: unimplemented: the configuration setting "enable_sort" is not supported +HINT: You have attempted to use a feature that is not yet implemented. @@ -119457,7 +118468,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +group by rollup(a, b), rollup(a, c) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- "group by distinct" behavior... select a, b, c from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) @@ -119513,7 +118524,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/groupingsets.out +group by rollup(a, b), rollup(a, c) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev -- test handling of outer GroupingFunc within subqueries explain (costs off) select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); @@ -120078,7 +119089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/matview.out --lab +CREATE TABLE mvtest_boxes (id serial primary key, b box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev INSERT INTO mvtest_boxes (b) VALUES ('(32,32),(31,31)'), ('(2.0000004,2.0000004),(1,1)'), @@ -120296,7 +119307,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/matview.out --lab +SET SESSION AUTHORIZATION regress_matview_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE MATERIALIZED VIEW matview_schema.mv_withdata1 (a) AS SELECT generate_series(1, 10) WITH DATA; EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) @@ -120421,9 +119432,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l + attrelid | attname | attgenerated +------------+--------------------------+-------------- + 4191000834 | crdb_internal_idx_expr | v -+ 3786725937 | crdb_internal_idx_expr | v -+ 3786725943 | crdb_internal_idx_expr_1 | v -+ 3046676848 | crdb_internal_idx_expr | v ++ 57626847 | crdb_internal_idx_expr | v ++ 57626841 | crdb_internal_idx_expr_1 | v ++ 1441863570 | crdb_internal_idx_expr | v +(4 rows) CREATE TABLE gtest0 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (55) STORED); @@ -120752,7 +119763,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest1_1 () INHERITS (gtest1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev SELECT * FROM gtest1_1; - a | b ----+--- @@ -120805,7 +119816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED) INHERITS (gtest_normal) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED); ALTER TABLE gtest_normal_child INHERIT gtest_normal; -- error -ERROR: column "b" in child table must not be a generated column @@ -120824,7 +119835,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error -NOTICE: merging column "b" with inherited definition -ERROR: column "b" inherits from generated column but specifies identity @@ -120833,7 +119844,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- ok, overrides parent -NOTICE: merging column "b" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -120841,7 +119852,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d+ gtestx - Table "public.gtestx" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -120892,7 +119903,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest1_y () INHERITS (gtest0, gtesty) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev DROP TABLE gtesty; CREATE TABLE gtesty (x int, b int); CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error @@ -120903,7 +119914,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev DROP TABLE gtesty; CREATE TABLE gtesty (x int, b int GENERATED ALWAYS AS (x * 22) STORED); CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error @@ -120915,7 +119926,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) STORED) INHERITS (gtest1, gtesty); -- ok -NOTICE: merging multiple inherited definitions of column "b" -NOTICE: moving and merging column "b" with inherited definition @@ -120925,7 +119936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) STORED) INHERITS (gtest1, gtesty) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d gtest1_y - Table "public.gtest1_y" - Column | Type | Collation | Nullable | Default @@ -120953,7 +119964,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/generated.out --l +CREATE TABLE gtestc (f2 int GENERATED ALWAYS AS (f1+1) STORED) INHERITS(gtestp) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO gtestc values(42); +ERROR: relation "gtestc" does not exist TABLE gtestc; @@ -121800,7 +120811,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/spgist.out --labe +create table spgist_point_tbl(id int4, p point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev create index spgist_point_idx on spgist_point_tbl using spgist(p) with (fillfactor = 75); +ERROR: at or near "spgist": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -121870,7 +120881,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/spgist.out --labe +create table spgist_box_tbl(id serial, b box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev insert into spgist_box_tbl(b) select box(point(i,j),point(i+s,j+s)) from generate_series(1,100,5) i, @@ -121993,7 +121004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/spgist.out --labe +create domain spgist_text as varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table spgist_domain_tbl (f1 spgist_text); +ERROR: type "spgist_text" does not exist create index spgist_domain_idx on spgist_domain_tbl using spgist(f1); @@ -122043,7 +121054,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/spgist.out --labe +create unlogged table spgist_unlogged_tbl(id serial, b box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev create index spgist_unlogged_idx on spgist_unlogged_tbl using spgist (b); +ERROR: at or near "spgist": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -122114,7 +121125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE uaccount ( pguser name primary key, seclv int @@ -122241,7 +121252,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel @@ -122301,7 +121312,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -122416,7 +121427,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_dave + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -122522,7 +121533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER POLICY p1 ON document USING (dauthor = current_user); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -122536,7 +121547,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -122576,7 +121587,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => great science fiction -NOTICE: f_leak => great technology book @@ -122635,7 +121646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE POLICY p2 ON category USING (CASE WHEN current_user = 'regress_rls_bob' THEN cid IN (11, 33) WHEN current_user = 'regress_rls_carol' THEN cid IN (22, 44) @@ -122658,7 +121669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; - did | cid | dlevel | dauthor | dtitle | cid | cname ------+-----+--------+-----------------+--------------------+-----+------------ @@ -122696,7 +121707,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; - did | cid | dlevel | dauthor | dtitle | cid | cname ------+-----+--------+-------------------+-----------------------+-----+----------------- @@ -122727,7 +121738,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO document VALUES (8, 44, 1, 'regress_rls_bob', 'my third manga'); -- Must fail with unique violation, revealing presence of did we can't see ERROR: duplicate key value violates unique constraint "document_pkey" +DETAIL: Key (did)=(8) already exists. @@ -122816,7 +121827,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; SELECT * FROM document; - did | cid | dlevel | dauthor | dtitle @@ -122847,7 +121858,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; SELECT * FROM document; - did | cid | dlevel | dauthor | dtitle @@ -122878,7 +121889,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; SELECT * FROM document; - did | cid | dlevel | dauthor | dtitle @@ -122909,7 +121920,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; CREATE TABLE t1 (id int not null primary key, a int, junk1 text, b text); ALTER TABLE t1 DROP COLUMN junk1; -- just a disturbing factor @@ -123001,7 +122012,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM t1; - id | a | b ------+---+----- @@ -123279,7 +122290,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; SELECT * FROM t1 WHERE f_leak(b); -NOTICE: f_leak => aba @@ -123335,7 +122346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE part_document ( did int, cid int, @@ -123462,7 +122473,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel @@ -123505,7 +122516,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -123559,7 +122570,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_dave + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -123640,7 +122651,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER TABLE part_document_satire ENABLE ROW LEVEL SECURITY; +ERROR: at or near "enable": syntax error +DETAIL: source SQL: @@ -123661,7 +122672,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_dave + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO part_document_satire VALUES (101, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail -ERROR: new row violates row-level security policy for table "part_document_satire" +ERROR: relation "part_document_satire" does not exist @@ -123710,7 +122721,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -123781,7 +122792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER POLICY pp1 ON part_document USING (dauthor = current_user); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -123795,7 +122806,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel @@ -123819,7 +122830,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => great science fiction -NOTICE: f_leak => great satire @@ -123890,7 +122901,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; SELECT * FROM part_document ORDER BY did; - did | cid | dlevel | dauthor | dtitle @@ -123925,7 +122936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; SELECT * FROM part_document ORDER by did; - did | cid | dlevel | dauthor | dtitle @@ -123960,7 +122971,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_dave + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; SELECT * FROM part_document ORDER by did; -ERROR: query would be affected by row-level security policy for table "part_document" @@ -123975,7 +122986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; CREATE POLICY pp3 ON part_document AS RESTRICTIVE USING ((SELECT dlevel <= seclv FROM uaccount WHERE pguser = current_user)); @@ -123990,7 +123001,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_carol', 'testing pp3'); -- fail -ERROR: new row violates row-level security policy "pp3" for table "part_document" +ERROR: relation "part_document" does not exist @@ -124001,7 +123012,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; CREATE TABLE dependee (x integer, y integer); CREATE TABLE dependent (x integer, y integer); @@ -124041,7 +123052,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE rec1 (x integer, y integer); CREATE POLICY r1 ON rec1 USING (x = (SELECT r.x FROM rec1 r WHERE y = r.y)); +ERROR: at or near "policy": syntax error @@ -124062,7 +123073,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rec1; -- fail, direct recursion -ERROR: infinite recursion detected in policy for relation "rec1" + x | y @@ -124078,7 +123089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE rec2 (a integer, b integer); ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2 WHERE b = y)); +ERROR: at or near "policy": syntax error @@ -124105,7 +123116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rec1; -- fail, mutual recursion -ERROR: infinite recursion detected in policy for relation "rec1" + x | y @@ -124121,7 +123132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rec1v AS SELECT * FROM rec1; CREATE VIEW rec2v AS SELECT * FROM rec2; SET SESSION AUTHORIZATION regress_rls_alice; @@ -124130,7 +123141,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -124150,7 +123161,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rec1; -- fail, mutual recursion via views -ERROR: infinite recursion detected in policy for relation "rec1" + x | y @@ -124166,7 +123177,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP VIEW rec1v, rec2v CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to policy r1 on table rec1 @@ -124190,7 +123201,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -124210,7 +123221,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rec1; -- fail, mutual recursion via s.b. views -ERROR: infinite recursion detected in policy for relation "rec1" + x | y @@ -124226,7 +123237,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE s1 (a int, b text); INSERT INTO s1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x); +ERROR: unknown function: public.fipshash() @@ -124271,7 +123282,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW v2 AS SELECT * FROM s2 WHERE y like '%af%'; SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion) -ERROR: infinite recursion detected in policy for relation "s1" @@ -124284,7 +123295,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP POLICY p3 on s1; +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -124304,7 +123315,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM s1 WHERE f_leak(b); -- OK -NOTICE: f_leak => 03b26944890929ff751653acb2f2af79 - a | b @@ -124335,7 +123346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER POLICY p1 ON s1 USING (a in (select x from v2)); -- using VIEW in RLS policy -SET SESSION AUTHORIZATION regress_rls_bob; +ERROR: at or near "policy": syntax error @@ -124349,7 +123360,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM s1 WHERE f_leak(b); -- OK -NOTICE: f_leak => 03b26944890929ff751653acb2f2af79 - a | b @@ -124408,7 +123419,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%')); -SET SESSION AUTHORIZATION regress_rls_bob; +ERROR: at or near "policy": syntax error @@ -124422,7 +123433,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion via view) -ERROR: infinite recursion detected in policy for relation "s1" +ERROR: unknown function: f_leak() @@ -124577,7 +123588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; EXECUTE p2(2); - id | a | b @@ -124616,7 +123627,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev EXPLAIN (COSTS OFF) UPDATE t1 SET b = b || b WHERE f_leak(b); - QUERY PLAN ------------------------------------------------------------ @@ -124894,7 +123905,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; EXPLAIN (COSTS OFF) DELETE FROM only t1 WHERE f_leak(b); - QUERY PLAN @@ -124961,7 +123972,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE b1 (a int, b text); INSERT INTO b1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x); +ERROR: unknown function: public.fipshash() @@ -124984,7 +123995,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW bv1 WITH (security_barrier) AS SELECT * FROM b1 WHERE a > 0 WITH CHECK OPTION; +ERROR: at or near "with": syntax error +DETAIL: source SQL: @@ -124999,7 +124010,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev EXPLAIN (COSTS OFF) SELECT * FROM bv1 WHERE f_leak(b); - QUERY PLAN ---------------------------------------------- @@ -125076,7 +124087,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM b1; - a | b ------+---------------------------------- @@ -125115,7 +124126,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP POLICY p1 ON document; +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -125154,7 +124165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Exists... SELECT * FROM document WHERE did = 2; did | cid | dlevel | dauthor | dtitle @@ -125214,7 +124225,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP POLICY p1 ON document; +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -125246,7 +124257,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Just because WCO-style enforcement of USING quals occurs with -- existing/target tuple does not mean that the implementation can be allowed -- to fail to also enforce this qual against the final tuple appended to @@ -125278,7 +124289,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP POLICY p3_with_default ON document; +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -125303,7 +124314,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Fails, since ALL WCO is enforced in insert path: INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33; @@ -125397,7 +124408,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Fails, since update violates WITH CHECK qual on dlevel MERGE INTO document d USING (SELECT 1 as sdid) s @@ -125510,7 +124521,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev MERGE INTO document d USING (SELECT 4 as sdid) s ON did = s.sdid @@ -125536,7 +124547,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Try INSERT action. This fails because we are trying to insert -- dauthor = regress_rls_dave and INSERT's WITH CHECK does not allow -- that @@ -125598,7 +124609,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- MERGE can no longer see the matching row and hence attempts the -- NOT MATCHED action, which results in unique key violation MERGE INTO document d @@ -125699,7 +124710,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE z1 (a int, b text); CREATE TABLE z2 (a int, b text); GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2, @@ -125731,7 +124742,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -125882,7 +124893,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM z1 WHERE f_leak(b); -NOTICE: f_leak => aba -NOTICE: f_leak => ccc @@ -126030,7 +125041,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +ERROR: unknown function: f_leak() +HINT: There is probably a typo in function name. Or the intention was to use a user-defined function in the view query, which is currently not supported. @@ -126043,7 +125054,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb @@ -126077,7 +125088,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb @@ -126113,7 +125124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +ERROR: unknown function: f_leak() +HINT: There is probably a typo in function name. Or the intention was to use a user-defined function in the view query, which is currently not supported. @@ -126127,7 +125138,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -126158,7 +125169,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -126188,7 +125199,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for view rls_view +ERROR: relation "rls_view" does not exist @@ -126206,7 +125217,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON rls_view TO regress_rls_carol; +ERROR: cannot get the privileges on the grant targets: cannot determine the target type of the GRANT statement: relation "rls_view" does not exist SET SESSION AUTHORIZATION regress_rls_carol; @@ -126215,7 +125226,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -126245,7 +125256,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE z1_blacklist (a int); INSERT INTO z1_blacklist VALUES (3), (4); CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); @@ -126261,7 +125272,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist +ERROR: relation "rls_view" does not exist @@ -126279,7 +125290,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist +ERROR: relation "rls_view" does not exist @@ -126297,7 +125308,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON z1_blacklist TO regress_rls_bob; SET SESSION AUTHORIZATION regress_rls_bob; +ERROR: at or near "regress_rls_bob": syntax error: unimplemented: this syntax @@ -126305,7 +125316,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b @@ -126335,7 +125346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b @@ -126365,7 +125376,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE SELECT ON z1_blacklist FROM regress_rls_bob; DROP POLICY p3 ON z1; -SET SESSION AUTHORIZATION regress_rls_bob; @@ -126380,7 +125391,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP VIEW rls_view; +ERROR: relation "rls_view" does not exist -- @@ -126393,7 +125404,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_view WITH (security_invoker) AS SELECT * FROM z1 WHERE f_leak(b); +ERROR: at or near "with": syntax error @@ -126440,7 +125451,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -126470,7 +125481,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => ccc @@ -126500,7 +125511,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP VIEW rls_view; +ERROR: relation "rls_view" does not exist SET SESSION AUTHORIZATION regress_rls_bob; @@ -126509,7 +125520,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_view WITH (security_invoker) AS SELECT * FROM z1 WHERE f_leak(b); +ERROR: at or near "with": syntax error @@ -126528,7 +125539,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => bbb @@ -126563,7 +125574,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb -NOTICE: f_leak => dad @@ -126593,7 +125604,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba -NOTICE: f_leak => ccc @@ -126623,7 +125634,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -126637,7 +125648,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist +ERROR: relation "rls_view" does not exist @@ -126655,7 +125666,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist +ERROR: relation "rls_view" does not exist @@ -126673,7 +125684,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON z1_blacklist TO regress_rls_bob; SET SESSION AUTHORIZATION regress_rls_bob; +ERROR: at or near "regress_rls_bob": syntax error: unimplemented: this syntax @@ -126681,7 +125692,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => bbb - a | b @@ -126711,7 +125722,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for table z1_blacklist +ERROR: relation "rls_view" does not exist @@ -126729,7 +125740,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON z1_blacklist TO regress_rls_carol; SET SESSION AUTHORIZATION regress_rls_carol; +ERROR: at or near "regress_rls_carol": syntax error: unimplemented: this syntax @@ -126737,7 +125748,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM rls_view; -NOTICE: f_leak => aba - a | b @@ -126767,7 +125778,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP VIEW rls_view; +ERROR: relation "rls_view" does not exist -- @@ -126779,7 +125790,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE x1 (a int, b text, c text); GRANT ALL ON x1 TO PUBLIC; INSERT INTO x1 VALUES @@ -126829,7 +125840,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => abc -NOTICE: f_leak => bcd @@ -126872,7 +125883,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; -NOTICE: f_leak => cde -NOTICE: f_leak => fgh @@ -126936,7 +125947,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE y1 (a int, b text); CREATE TABLE y2 (a int, b text); GRANT ALL ON y1, y2 TO regress_rls_bob; @@ -126987,7 +125998,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_sbv WITH (security_barrier) AS SELECT * FROM y1 WHERE f_leak(b); +ERROR: at or near "with": syntax error @@ -127016,7 +126027,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW rls_sbv WITH (security_barrier) AS SELECT * FROM y1 WHERE f_leak(b); +ERROR: at or near "with": syntax error @@ -127047,7 +126058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO y2 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); +ERROR: unknown function: public.fipshash() CREATE POLICY p2 ON y2 USING (a % 3 = 0); @@ -127069,7 +126080,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM y2 WHERE f_leak(b); -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 @@ -127352,7 +126363,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; -NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 -NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 @@ -127477,7 +126488,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE t2 (a integer, b text); INSERT INTO t2 (SELECT * FROM t1); EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1); @@ -127577,7 +126588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE blog (id integer, author text, post text); CREATE TABLE comment (blog_id integer, message text); GRANT ALL ON blog, comment TO regress_rls_bob; @@ -127605,7 +126616,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Check RLS JOIN with Non-RLS. SELECT id, author, message FROM blog JOIN comment ON id = blog_id; id | author | message @@ -127639,7 +126650,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE POLICY comment_1 ON comment USING (blog_id < 4); +ERROR: at or near "policy": syntax error +DETAIL: source SQL: @@ -127659,7 +126670,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Check RLS JOIN RLS SELECT id, author, message FROM blog JOIN comment ON id = blog_id; id | author | message @@ -127691,7 +126702,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP TABLE blog, comment; -- -- Default Deny Policy @@ -127765,7 +126776,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM t1; - a | b -----+---------------------------------- @@ -127814,7 +126825,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO ON; SELECT * FROM t1; - a | b @@ -127842,7 +126853,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM t1; - a | b ----+--- @@ -127931,7 +126942,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" @@ -127950,7 +126961,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok -0,5feceb66ffc86f38d952786c6d696c79 @@ -127984,7 +126995,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_t" @@ -128036,7 +127047,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" @@ -128049,7 +127060,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 @@ -128063,7 +127074,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to @@ -128084,7 +127095,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +CREATE TABLE copy_rel_to_child () INHERITS (copy_rel_to) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO copy_rel_to_child VALUES (1, 'one'), (2, 'two'); +ERROR: relation "copy_rel_to_child" does not exist -- Check COPY TO as Superuser/owner. @@ -128107,7 +127118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS -ERROR: query would be affected by row-level security policy for table "copy_rel_to" @@ -128120,7 +127131,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_exempt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok -1,6b86b273ff34fce19d6b804eff5a3f57 @@ -128134,7 +127145,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for table copy_rel_to @@ -128159,7 +127170,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_bob + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_t FROM STDIN; --fail - would be affected by RLS. -ERROR: query would be affected by row-level security policy for table "copy_t" @@ -128179,7 +127190,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowsecurity.out - +SET SESSION AUTHORIZATION regress_rls_carol + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET row_security TO OFF; COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for table copy_t @@ -128964,7 +127975,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gin.out --label=/ with (fastupdate = on, gin_pending_list_limit = 4096); +ERROR: unimplemented: storage parameter "fastupdate" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev insert into gin_test_tbl select array[1, 2, g] from generate_series(1, 20000) g; insert into gin_test_tbl select array[1, 3, g] from generate_series(1, 1000) g; select gin_clean_pending_list('gin_test_idx')>10 as many; -- flush the fastupdate buffers @@ -129139,7 +128150,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gin.out --label=/ +returns table (explain_line json) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev create function execute_text_query_index(query_sql text) returns setof text language plpgsql @@ -129149,7 +128160,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gin.out --label=/ $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev create function execute_text_query_heap(query_sql text) returns setof text language plpgsql @@ -129159,7 +128170,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gin.out --label=/ $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev -- check number of rows returned by index and removed by recheck select query, @@ -129340,7 +128351,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gist.out --label= +create table gist_point_tbl(id int4, p point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev create index gist_pointidx on gist_point_tbl using gist(p); +ERROR: relation "gist_point_tbl" does not exist -- Verify the fillfactor and buffering options @@ -129413,7 +128424,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gist.out --label= +create table gist_tbl (b box, p point, c circle) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev insert into gist_tbl select box(point(0.05*i, 0.05*i), point(0.05*i, 0.05*i)), point(0.05*i, 0.05*i), @@ -129908,10 +128919,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gist.out --label= +HINT: try \h select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1; -ERROR: lossy distance functions are not supported in index-only scans -+ERROR: at or near "-": syntax error ++ERROR: at or near "(": syntax error +DETAIL: source SQL: +select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1 -+ ^ ++ ^ -- Force an index build using buffering. create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p) with (buffering=on, fillfactor=50); @@ -129954,7 +128965,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/gist.out --label= +create unlogged table gist_tbl (b box) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev create index gist_tbl_box_index on gist_tbl using gist (b); +ERROR: relation "gist_tbl" does not exist insert into gist_tbl @@ -129989,7 +129000,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin.out --label= + macaddrcol macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev INSERT INTO brintest SELECT repeat(stringu1, 8)::bytea, substr(stringu1, 1, 1)::"char", @@ -130013,7 +129024,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin.out --label= + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev -- throw in some NULL's and different values INSERT INTO brintest (inetcol, cidrcol, int4rangecol) SELECT inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, @@ -130027,7 +129038,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin.out --label= + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev CREATE INDEX brinidx ON brintest USING brin ( byteacol, charcol, @@ -130099,7 +129110,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin.out --label= + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev SELECT brin_desummarize_range('brinidx', 0); - brin_desummarize_range ------------------------- @@ -130476,7 +129487,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/join_hash.out --l +returns table (original int, final int) language plpgsql + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev -- Make a simple relation with well distributed keys and correctly -- estimated size. create table simple as @@ -132132,7 +131143,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT pg_read_all_settings TO regress_priv_user9 WITH ADMIN OPTION; +ERROR: role/user "pg_read_all_settings" does not exist SET SESSION AUTHORIZATION regress_priv_user9; @@ -132141,7 +131152,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user9 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT pg_read_all_settings TO regress_priv_user10; +ERROR: role/user "pg_read_all_settings" does not exist SET SESSION AUTHORIZATION regress_priv_user8; @@ -132150,7 +131161,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE pg_read_all_settings FROM regress_priv_user10 GRANTED BY regress_priv_user9; +ERROR: at or near "granted": syntax error +DETAIL: source SQL: @@ -132175,7 +131186,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET ROLE pg_read_all_settings; +ERROR: role/user "pg_read_all_settings" does not exist RESET ROLE; @@ -132202,7 +131213,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SET ROLE pg_read_all_settings; -- fail, no SET option any more -ERROR: permission denied to set role "pg_read_all_settings" +ERROR: role/user "pg_read_all_settings" does not exist @@ -132245,7 +131256,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER GROUP regress_priv_group2 ADD USER regress_priv_user2; -NOTICE: role "regress_priv_user2" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user1" +ERROR: at or near "add": syntax error @@ -132335,7 +131346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT session_user, current_user; - session_user | current_user ---------------------+-------------------- @@ -132382,7 +131393,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT session_user, current_user; - session_user | current_user ---------------------+-------------------- @@ -132530,7 +131541,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; CREATE INDEX ON atest12 (a); @@ -132576,7 +131587,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE OPERATOR <<< (procedure = leak, leftarg = integer, rightarg = integer, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- views with leaky operator CREATE VIEW atest12v AS SELECT * FROM atest12 WHERE b <<< 5; @@ -132657,7 +131668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION leak2(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; @@ -132669,7 +131680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev -- This should not show any "leak" notices before failing. EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; -ERROR: permission denied for table atest12 @@ -132758,7 +131769,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT (a, b) ON atest12 TO PUBLIC; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -132771,7 +131782,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- regress_priv_user2 should continue to get a good row estimate. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; - QUERY PLAN @@ -132830,7 +131841,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE atest3 (one int, two int, three int); GRANT DELETE ON atest3 TO GROUP regress_priv_group2; +ERROR: at or near "group": syntax error @@ -132844,7 +131855,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM atest3; -- fail -ERROR: permission denied for table atest3 + one | two | three @@ -132871,7 +131882,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SAVEPOINT s1; +ERROR: current transaction is aborted, commands ignored until end of transaction block DELETE FROM atest3; -- ok because grant-level option is unchanged @@ -132896,7 +131907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DELETE FROM atest3; -- fail -ERROR: permission denied for table atest3 +ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -132919,7 +131930,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DELETE FROM atest3; -- also fail -ERROR: permission denied for table atest3 +ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -132931,7 +131942,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE VIEW atestv1 AS SELECT * FROM atest1; -- ok /* The next *should* fail, but it's not implemented that way yet. */ CREATE VIEW atestv2 AS SELECT * FROM atest2; @@ -132952,7 +131963,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM atestv1; -- ok a | b ---+----- @@ -133035,7 +132046,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Two complex cases: SELECT * FROM atestv3; -- fail -ERROR: permission denied for view atestv3 @@ -133067,7 +132078,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE atest5 (one int, two int unique, three int, four int unique); CREATE TABLE atest6 (one int, two int, blue int); GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_priv_user4; @@ -133089,7 +132100,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM atest5; -- fail -ERROR: permission denied for table atest5 + one | two | three | four @@ -133320,7 +132331,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT (one,two) ON atest6 TO regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133333,7 +132344,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still -ERROR: permission denied for table atest5 + one | two @@ -133346,7 +132357,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT (two) ON atest5 TO regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133359,7 +132370,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now one | two -----+----- @@ -133411,7 +132422,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT INSERT (four) ON atest5 TO regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133424,7 +132435,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- fails (due to SELECT) -ERROR: permission denied for table atest5 INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- fails (due to SELECT) @@ -133438,7 +132449,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT (four) ON atest5 TO regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133451,7 +132462,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- ok INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- ok SET SESSION AUTHORIZATION regress_priv_user1; @@ -133460,7 +132471,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE ALL (one) ON atest5 FROM regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133479,7 +132490,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT one FROM atest5; -- fail -ERROR: permission denied for table atest5 + one @@ -133507,7 +132518,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE mtarget (a int, b text); CREATE TABLE msource (a int, b text); INSERT INTO mtarget VALUES (1, 'init1'), (2, 'init2'); @@ -133542,7 +132553,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- -- test source privileges -- @@ -133595,7 +132606,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT (b) ON msource TO regress_priv_user4; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133608,7 +132619,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- should now be ok BEGIN; MERGE INTO mtarget t USING msource s ON t.a = s.a @@ -133680,7 +132691,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT DELETE ON mtarget TO regress_priv_user4; -- should be ok now BEGIN; @@ -133699,7 +132710,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE t1 (c1 int, c2 int, c3 int check (c3 < 5), primary key (c1, c2)); GRANT SELECT (c1) ON t1 TO regress_priv_user2; +ERROR: at or near "(": syntax error @@ -133731,7 +132742,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO t1 (c1, c2) VALUES (1, 1); -- fail, but row not shown ERROR: duplicate key value violates unique constraint "t1_pkey" +DETAIL: Key (c1, c2)=(1, 1) already exists. @@ -133760,7 +132771,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP TABLE t1; -- check error reporting with column privs on a partitioned table CREATE TABLE errtst(a text, b text NOT NULL, c text, secret1 text, secret2 text) PARTITION BY LIST (a); @@ -133809,7 +132820,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- Perform a few updates that violate the NOT NULL constraint. Make sure -- the error messages don't leak the secret fields. -- simple insert. @@ -133846,7 +132857,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP TABLE errtst; +ERROR: relation "errtst" does not exist -- test column-level privileges when involved with DELETE @@ -133856,7 +132867,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER TABLE atest6 ADD COLUMN three integer; GRANT DELETE ON atest5 TO regress_priv_user3; GRANT SELECT (two) ON atest5 TO regress_priv_user3; @@ -133883,7 +132894,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT atest6 FROM atest6; -- fail -ERROR: permission denied for table atest6 + atest6 @@ -133902,7 +132913,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER TABLE atest6 DROP COLUMN three; SET SESSION AUTHORIZATION regress_priv_user4; +ERROR: at or near "regress_priv_user4": syntax error: unimplemented: this syntax @@ -133910,7 +132921,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT atest6 FROM atest6; -- ok atest6 -------- @@ -133923,7 +132934,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER TABLE atest6 DROP COLUMN two; REVOKE SELECT (one,blue) ON atest6 FROM regress_priv_user4; +ERROR: at or near "(": syntax error @@ -133937,7 +132948,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM atest6; -- fail -ERROR: permission denied for table atest6 + one | blue @@ -133956,7 +132967,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DELETE FROM atest5 WHERE one = 1; -- fail -ERROR: permission denied for table atest5 DELETE FROM atest5 WHERE two = 2; -- ok @@ -133967,7 +132978,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE atestp1 (f1 int, f2 int); CREATE TABLE atestp2 (fx int, fy int); CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2); @@ -133976,7 +132987,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev GRANT SELECT(fx,fy,tableoid) ON atestp2 TO regress_priv_user2; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -133995,7 +133006,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT fx FROM atestp2; -- ok fx ---- @@ -134011,7 +133022,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT(fy,tableoid) ON atestc TO regress_priv_user2; +ERROR: at or near "(": syntax error +DETAIL: source SQL: @@ -134024,11 +133035,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT fx FROM atestp2; -- still ok fx ---- -@@ -1077,69 +1855,145 @@ +@@ -1077,69 +1855,127 @@ -- child's permissions do not apply when operating on parent SET SESSION AUTHORIZATION regress_priv_user1; @@ -134037,7 +133048,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE ALL ON atestc FROM regress_priv_user2; +ERROR: cannot get the privileges on the grant targets: cannot determine the target type of the GRANT statement: relation "atestc" does not exist GRANT ALL ON atestp1 TO regress_priv_user2; @@ -134047,7 +133058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT f2 FROM atestp1; -- ok f2 ---- @@ -134112,7 +133123,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT USAGE ON LANGUAGE sql TO regress_priv_user2; -- fail -WARNING: no privileges were granted for "sql" +ERROR: at or near "sql": syntax error @@ -134128,7 +133139,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE AGGREGATE priv_testagg1(int) (sfunc = int4pl, stype = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE PROCEDURE priv_testproc1(int) AS 'select $1;' LANGUAGE sql; REVOKE ALL ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) FROM PUBLIC; +ERROR: unknown function: priv_testagg1() @@ -134158,36 +133169,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- CREATE FUNCTION priv_testfunc4(boolean) RETURNS text AS 'select col1 from atest2 where col2 = $1;' LANGUAGE sql SECURITY DEFINER; -+ERROR: at or near "definer": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE FUNCTION priv_testfunc4(boolean) RETURNS text -+ AS 'select col1 from atest2 where col2 = $1;' -+ LANGUAGE sql SECURITY DEFINER -+ ^ -+HINT: You have attempted to use a feature that is not yet implemented. -+ -+Please check the public issue tracker to check whether this problem is -+already tracked. If you cannot find it there, please report the error -+with details by creating a new issue. -+ -+If you would rather not post publicly, please contact us directly -+using the support form. -+ -+We appreciate your feedback. -+ GRANT EXECUTE ON FUNCTION priv_testfunc4(boolean) TO regress_priv_user3; -+ERROR: unknown function: priv_testfunc4() SET SESSION AUTHORIZATION regress_priv_user2; +ERROR: at or near "regress_priv_user2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT priv_testfunc1(5), priv_testfunc2(5); -- ok priv_testfunc1 | priv_testfunc2 ----------------+---------------- -@@ -1147,30 +2001,45 @@ +@@ -1147,30 +1983,49 @@ (1 row) CREATE FUNCTION priv_testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail @@ -134209,7 +133202,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT priv_testfunc1(5); -- fail -ERROR: permission denied for function priv_testfunc1 + priv_testfunc1 @@ -134227,28 +133220,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- + SELECT col1 FROM atest2 WHERE col2 = true; -- fail -ERROR: permission denied for table atest2 --SELECT priv_testfunc4(true); -- ok -- priv_testfunc4 ------------------ -- bar --(1 row) + col1 +------ +(0 rows) ++ + SELECT priv_testfunc4(true); -- ok + priv_testfunc4 + ---------------- +- bar ++ + (1 row) -+SELECT priv_testfunc4(true); -- ok -+ERROR: unknown function: priv_testfunc4() SET SESSION AUTHORIZATION regress_priv_user4; +ERROR: at or near "regress_priv_user4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT priv_testfunc1(5); -- ok priv_testfunc1 ---------------- -@@ -1178,22 +2047,30 @@ +@@ -1178,22 +2033,30 @@ (1 row) SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok @@ -134271,7 +133264,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP AGGREGATE priv_testagg1(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP PROCEDURE priv_testproc1(int); -- fail -ERROR: must be owner of procedure priv_testproc1 \c - @@ -134287,7 +133280,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- verify privilege checks on array-element coercions BEGIN; SELECT '{1}'::int4[]::int8[]; -@@ -1203,15 +2080,18 @@ +@@ -1203,15 +2066,18 @@ (1 row) REVOKE ALL ON FUNCTION int8(integer) FROM PUBLIC; @@ -134305,14 +133298,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT '{1}'::int4[]::int8[]; --other user, fail -ERROR: permission denied for function int8 +ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; -- privileges on types -- switch to superuser -@@ -1220,101 +2100,338 @@ +@@ -1220,101 +2086,338 @@ REVOKE USAGE ON TYPE priv_testtype1 FROM PUBLIC; GRANT USAGE ON TYPE priv_testtype1 TO regress_priv_user2; GRANT USAGE ON TYPE _priv_testtype1 TO regress_priv_user2; -- fail @@ -134331,7 +133324,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE DOMAIN priv_testdomain1 AS int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev REVOKE USAGE on DOMAIN priv_testdomain1 FROM PUBLIC; +ERROR: at or near "priv_testdomain1": syntax error +DETAIL: source SQL: @@ -134352,7 +133345,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- commands that should fail CREATE AGGREGATE priv_testagg1a(priv_testdomain1) (sfunc = int4_sum, stype = bigint); -ERROR: permission denied for type priv_testdomain1 @@ -134361,7 +133354,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE AGGREGATE priv_testagg1a(priv_testdomain1) (sfunc = int4_sum, stype = bigint) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE DOMAIN priv_testdomain2a AS priv_testdomain1; -ERROR: permission denied for type priv_testdomain1 +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -134369,14 +133362,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE DOMAIN priv_testdomain2a AS priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN priv_testdomain3a AS int; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN priv_testdomain3a AS int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3a AS $$ SELECT $1::priv_testdomain3a $$ LANGUAGE SQL; +ERROR: type "priv_testdomain3a" does not exist CREATE CAST (priv_testdomain1 AS priv_testdomain3a) WITH FUNCTION castfunc(int); @@ -134415,7 +133408,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP DOMAIN priv_testdomain3a + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION priv_testfunc5a(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; -ERROR: permission denied for type priv_testdomain1 +ERROR: type "priv_testdomain1" does not exist @@ -134429,7 +133422,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = priv_testdomain1, RIGHTARG = priv_testdomain1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE TABLE test5a (a int, b priv_testdomain1); -ERROR: permission denied for type priv_testdomain1 +ERROR: type "priv_testdomain1" does not exist @@ -134460,7 +133453,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +ALTER TYPE test8a ADD ATTRIBUTE c priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test8a ALTER ATTRIBUTE b TYPE priv_testdomain1; -ERROR: permission denied for type priv_testdomain1 +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -134468,7 +133461,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +ALTER TYPE test8a ALTER ATTRIBUTE b TYPE priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev CREATE TABLE test11a AS (SELECT 1::priv_testdomain1 AS a); -ERROR: permission denied for type priv_testdomain1 +ERROR: type "priv_testdomain1" does not exist @@ -134480,7 +133473,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- commands that should succeed CREATE AGGREGATE priv_testagg1b(priv_testdomain1) (sfunc = int4_sum, stype = bigint); +ERROR: at or near "priv_testagg1b": syntax error: unimplemented: this syntax @@ -134488,21 +133481,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE AGGREGATE priv_testagg1b(priv_testdomain1) (sfunc = int4_sum, stype = bigint) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE DOMAIN priv_testdomain2b AS priv_testdomain1; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN priv_testdomain2b AS priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN priv_testdomain3b AS int; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN priv_testdomain3b AS int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3b AS $$ SELECT $1::priv_testdomain3b $$ LANGUAGE SQL; +ERROR: type "priv_testdomain3b" does not exist CREATE CAST (priv_testdomain1 AS priv_testdomain3b) WITH FUNCTION castfunc(int); @@ -134532,7 +133525,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE OPERATOR !! (PROCEDURE = priv_testfunc5b, RIGHTARG = priv_testdomain1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE TABLE test5b (a int, b priv_testdomain1); +ERROR: type "priv_testdomain1" does not exist CREATE TABLE test6b OF priv_testtype1; @@ -134556,14 +133549,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +ALTER TYPE test8b ADD ATTRIBUTE c priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); +ERROR: type "priv_testdomain1" does not exist REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; @@ -134575,14 +133568,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP AGGREGATE priv_testagg1b(priv_testdomain1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP DOMAIN priv_testdomain2b; +ERROR: at or near "priv_testdomain2b": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN priv_testdomain2b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP OPERATOR !! (NONE, priv_testdomain1); +ERROR: at or near "!": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -134647,7 +133640,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP DOMAIN priv_testdomain3b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP TABLE test11b; +ERROR: relation "test11b" does not exist DROP TYPE priv_testtype1; -- ok @@ -134658,7 +133651,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP DOMAIN priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- truncate SET SESSION AUTHORIZATION regress_priv_user5; +ERROR: at or near "regress_priv_user5": syntax error: unimplemented: this syntax @@ -134666,14 +133659,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user5 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev TRUNCATE atest2; -- ok TRUNCATE atest3; -- fail -ERROR: permission denied for table atest3 -- has_table_privilege function -- bad-input checks select has_table_privilege(NULL,'pg_authid','select'); -@@ -1324,11 +2441,11 @@ +@@ -1324,11 +2427,11 @@ (1 row) select has_table_privilege('pg_shad','select'); @@ -134688,7 +133681,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- select has_table_privilege(-999999,'pg_authid','update'); has_table_privilege --------------------- -@@ -1352,21 +2469,21 @@ +@@ -1352,21 +2455,21 @@ select has_table_privilege(current_user,'pg_authid','insert'); has_table_privilege --------------------- @@ -134713,7 +133706,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) -- 'rule' privilege no longer exists, but for backwards compatibility -@@ -1398,25 +2515,25 @@ +@@ -1398,25 +2501,25 @@ (select oid from pg_roles where rolname = current_user) as t2; has_table_privilege --------------------- @@ -134743,7 +133736,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) select has_table_privilege(t1.oid,'select') -@@ -1430,11 +2547,17 @@ +@@ -1430,11 +2533,17 @@ from (select oid from pg_class where relname = 'pg_authid') as t1; has_table_privilege --------------------- @@ -134758,11 +133751,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev select has_table_privilege(current_user,'pg_class','select'); has_table_privilege --------------------- -@@ -1465,7 +2588,7 @@ +@@ -1465,7 +2574,7 @@ from (select oid from pg_class where relname = 'pg_class') as t1; has_table_privilege --------------------- @@ -134771,7 +133764,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) select has_table_privilege(t2.oid,t1.oid,'select') -@@ -1525,28 +2648,28 @@ +@@ -1525,28 +2634,28 @@ select has_table_privilege(current_user,'atest1','insert'); has_table_privilege --------------------- @@ -134804,7 +133797,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) select has_table_privilege(t2.oid,t1.oid,'select') -@@ -1562,25 +2685,25 @@ +@@ -1562,25 +2671,25 @@ (select oid from pg_roles where rolname = current_user) as t2; has_table_privilege --------------------- @@ -134834,7 +133827,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) select has_table_privilege(t1.oid,'select') -@@ -1594,7 +2717,7 @@ +@@ -1594,7 +2703,7 @@ from (select oid from pg_class where relname = 'atest1') as t1; has_table_privilege --------------------- @@ -134843,7 +133836,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) -- has_column_privilege function -@@ -1606,7 +2729,7 @@ +@@ -1606,7 +2715,7 @@ (1 row) select has_column_privilege('pg_authid','nosuchcol','select'); @@ -134852,7 +133845,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- select has_column_privilege(9999,'nosuchcol','select'); has_column_privilege ---------------------- -@@ -1620,11 +2743,7 @@ +@@ -1620,11 +2729,7 @@ (1 row) select has_column_privilege('pg_authid',99::int2,'select'); @@ -134865,7 +133858,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- select has_column_privilege(9999,99::int2,'select'); has_column_privilege ---------------------- -@@ -1634,65 +2753,69 @@ +@@ -1634,65 +2739,69 @@ create temp table mytable(f1 int, f2 int, f3 int); alter table mytable drop column f2; select has_column_privilege('mytable','f2','select'); @@ -134915,7 +133908,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE atest4 (a int); GRANT SELECT ON atest4 TO regress_priv_user2 WITH GRANT OPTION; GRANT UPDATE ON atest4 TO regress_priv_user2; @@ -134931,7 +133924,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT SELECT ON atest4 TO regress_priv_user3; GRANT UPDATE ON atest4 TO regress_priv_user3; -- fail -WARNING: no privileges were granted for "atest4" @@ -134941,7 +133934,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev REVOKE SELECT ON atest4 FROM regress_priv_user3; -- does nothing SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- true has_table_privilege @@ -134966,7 +133959,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- false -@@ -1704,7 +2827,7 @@ +@@ -1704,7 +2813,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true has_table_privilege --------------------- @@ -134975,11 +133968,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) -- security-restricted operations -@@ -1720,6 +2843,18 @@ +@@ -1720,6 +2829,19 @@ RETURN $1; END; $$ LANGUAGE plpgsql IMMUTABLE; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_assert is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -134994,7 +133988,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- Create a table owned by regress_sro_user CREATE TABLE sro_tab (a int); ALTER TABLE sro_tab OWNER TO regress_sro_user; -@@ -1727,84 +2862,197 @@ +@@ -1727,84 +2849,199 @@ -- Create an expression index with a predicate CREATE INDEX sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) WHERE sro_ifun(a + 10) > sro_ifun(10); @@ -135108,7 +134102,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_sro_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS 'GRANT regress_priv_group2 TO regress_sro_user'; +ERROR: unimplemented: GRANT usage inside a function definition @@ -135156,12 +134150,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_sro_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- INSERT to this table will queue a GRANT at end of transaction CREATE TABLE sro_trojan_table (); CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS 'BEGIN PERFORM unwanted_grant(); RETURN NULL; END'; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan(); +ERROR: at or near "t": syntax error: unimplemented: this syntax @@ -135169,7 +134165,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev -- Now, REFRESH will issue such an INSERT, queueing the GRANT CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS 'INSERT INTO sro_trojan_table DEFAULT VALUES; SELECT true'; @@ -135212,11 +134208,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_sro_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int IMMUTABLE LANGUAGE plpgsql AS $$ BEGIN -@@ -1814,52 +3062,105 @@ +@@ -1814,52 +3051,102 @@ EXCEPTION WHEN OTHERS THEN RETURN 2; END$$; @@ -135252,13 +134248,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS 'GRANT regress_priv_group2 TO regress_priv_user5'; -+ERROR: at or near "definer": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS -+ ^ ++ERROR: unimplemented: GRANT usage inside a function definition +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -135284,7 +134277,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. @@ -135309,7 +134302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_group2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no self-admin -ERROR: permission denied to grant role "regress_priv_group2" -DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. @@ -135320,7 +134313,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP FUNCTION dogrant_ok(); +ERROR: unknown function: dogrant_ok() REVOKE regress_priv_group2 FROM regress_priv_user5; @@ -135338,7 +134331,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); has_sequence_privilege ------------------------ -@@ -1867,6 +3168,12 @@ +@@ -1867,6 +3154,12 @@ (1 row) SET SESSION AUTHORIZATION regress_priv_user2; @@ -135347,11 +134340,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT has_sequence_privilege('x_seq', 'USAGE'); has_sequence_privilege ------------------------ -@@ -1876,203 +3183,286 @@ +@@ -1876,203 +3169,286 @@ -- largeobject privilege tests \c - SET SESSION AUTHORIZATION regress_priv_user1; @@ -135360,7 +134353,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT lo_create(1001); - lo_create ------------ @@ -135454,7 +134447,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT lo_create(2001); - lo_create ------------ @@ -135610,7 +134603,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT loread(lo_open(1001, x'40000'::int), 32); - loread ------------- @@ -135674,7 +134667,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied -ERROR: permission denied for large object 1002 +ERROR: lexical error: invalid hexadecimal bytes literal @@ -135728,7 +134721,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT loread(lo_open(1002, x'40000'::int), 32); - loread --------- @@ -135772,7 +134765,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- don't allow unpriv users to access pg_largeobject contents \c - SELECT * FROM pg_largeobject LIMIT 0; -@@ -2081,68 +3471,80 @@ +@@ -2081,68 +3457,80 @@ (0 rows) SET SESSION AUTHORIZATION regress_priv_user1; @@ -135781,7 +134774,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -ERROR: permission denied for table pg_largeobject + loid | pageno | data @@ -135841,7 +134834,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev TABLE information_schema.enabled_roles ORDER BY role_name COLLATE "C"; - role_name ---------------------- @@ -135872,7 +134865,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_priv_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev TABLE information_schema.enabled_roles; - role_name --------------------- @@ -135886,7 +134879,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- ROLLBACK; -- test default ACLs \c - -@@ -2220,69 +3622,34 @@ +@@ -2220,69 +3608,34 @@ (1 row) ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; @@ -135967,7 +134960,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- -- Testing blanket default grants is very hazardous since it might change -- the privileges attached to objects created by concurrent regression tests. -@@ -2300,7 +3667,7 @@ +@@ -2300,7 +3653,7 @@ SELECT has_schema_privilege('regress_priv_user6', 'testns2', 'USAGE'); -- yes has_schema_privilege ---------------------- @@ -135976,7 +134969,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- (1 row) SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no -@@ -2354,19 +3721,18 @@ +@@ -2354,19 +3707,18 @@ classid = 'pg_default_acl'::regclass; count ------- @@ -135987,7 +134980,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- DROP OWNED BY regress_priv_user2, regress_priv_user2; +ERROR: unimplemented: drop owned by is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/55381/v24.2 ++See: https://go.crdb.dev/issue-v/55381/dev SELECT count(*) FROM pg_shdepend WHERE deptype = 'a' AND refobjid = 'regress_priv_user2'::regrole AND @@ -136001,7 +134994,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- ROLLBACK; CREATE SCHEMA testns5; SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no -@@ -2384,30 +3750,59 @@ +@@ -2384,30 +3736,59 @@ SET ROLE regress_priv_user1; CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); @@ -136010,7 +135003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no has_function_privilege @@ -136057,18 +135050,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP AGGREGATE testns.agg1(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +ERROR: at or near "testns": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP PROCEDURE testns.bar(); CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes -@@ -2417,11 +3812,7 @@ +@@ -2417,11 +3798,7 @@ (1 row) SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes @@ -136081,7 +135074,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) has_function_privilege ------------------------ -@@ -2430,36 +3821,57 @@ +@@ -2430,36 +3807,57 @@ DROP FUNCTION testns.foo(); DROP AGGREGATE testns.agg1(int); @@ -136090,7 +135083,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP AGGREGATE testns.agg1(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP PROCEDURE testns.bar(); ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; CREATE DOMAIN testns.priv_testdomain1 AS int; @@ -136099,7 +135092,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE DOMAIN testns.priv_testdomain1 AS int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no - has_type_privilege --------------------- @@ -136114,14 +135107,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP DOMAIN testns.priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN testns.priv_testdomain1 AS int; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN testns.priv_testdomain1 AS int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes - has_type_privilege --------------------- @@ -136135,7 +135128,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP DOMAIN testns.priv_testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev RESET ROLE; SELECT count(*) FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid @@ -136151,7 +135144,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- DROP SCHEMA testns2 CASCADE; DROP SCHEMA testns3 CASCADE; DROP SCHEMA testns4 CASCADE; -@@ -2510,6 +3922,12 @@ +@@ -2510,6 +3908,12 @@ CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); @@ -136160,11 +135153,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default has_function_privilege -@@ -2518,11 +3936,7 @@ +@@ -2518,11 +3922,7 @@ (1 row) SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default @@ -136177,7 +135170,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default has_function_privilege ------------------------ -@@ -2533,15 +3947,11 @@ +@@ -2533,15 +3933,11 @@ SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false has_function_privilege ------------------------ @@ -136195,7 +135188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function has_function_privilege ------------------------ -@@ -2552,10 +3962,15 @@ +@@ -2552,10 +3948,15 @@ SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false has_function_privilege ------------------------ @@ -136212,7 +135205,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true has_function_privilege ------------------------ -@@ -2563,11 +3978,7 @@ +@@ -2563,11 +3964,7 @@ (1 row) SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true @@ -136225,7 +135218,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true has_function_privilege ------------------------ -@@ -2575,38 +3986,52 @@ +@@ -2575,38 +3972,52 @@ (1 row) DROP SCHEMA testns CASCADE; @@ -136290,7 +135283,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- test that dependent privileges are revoked (or not) properly \c - set session role regress_priv_user1; -@@ -2620,59 +4045,180 @@ +@@ -2620,59 +4031,179 @@ set session role regress_priv_user4; grant select on dep_priv_test to regress_priv_user5; \dp dep_priv_test @@ -136483,10 +135476,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +DROP AGGREGATE priv_testagg1(int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev DROP FUNCTION priv_testfunc2(int); DROP FUNCTION priv_testfunc4(boolean); -+ERROR: unknown function: priv_testfunc4() DROP PROCEDURE priv_testproc1(int); +ERROR: unknown procedure: priv_testproc1() DROP VIEW atestv0; @@ -136502,7 +135494,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- DROP TABLE atest1; DROP TABLE atest2; DROP TABLE atest3; -@@ -2680,108 +4226,203 @@ +@@ -2680,108 +4211,204 @@ DROP TABLE atest5; DROP TABLE atest6; DROP TABLE atestc; @@ -136533,6 +135525,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- DROP OWNED BY regress_priv_user1; DROP USER regress_priv_user1; DROP USER regress_priv_user2; ++ERROR: cannot drop role/user regress_priv_user2: grants still exist on root.public.priv_testtype1, root.public._priv_testtype1 DROP USER regress_priv_user3; DROP USER regress_priv_user4; +ERROR: cannot drop role/user regress_priv_user4: grants still exist on root.public.mtarget @@ -136553,7 +135546,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_locktable_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ERROR: at or near "lock": syntax error @@ -136587,7 +135580,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_locktable_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ERROR: at or near "lock": syntax error @@ -136620,7 +135613,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_locktable_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ERROR: at or near "lock": syntax error @@ -136652,7 +135645,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_locktable_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ERROR: at or near "lock": syntax error @@ -136688,7 +135681,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_locktable_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ERROR: at or near "lock": syntax error @@ -136719,7 +135712,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- -- clean up DROP TABLE lock_table; DROP USER regress_locktable_user; -@@ -2791,24 +4432,17 @@ +@@ -2791,24 +4418,17 @@ \c - CREATE ROLE regress_readallstats; SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no @@ -136748,7 +135741,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes has_table_privilege --------------------- -@@ -2818,11 +4452,7 @@ +@@ -2818,11 +4438,7 @@ -- run query to ensure that functions within views can be executed SET ROLE regress_readallstats; SELECT COUNT(*) >= 0 AS ok FROM pg_backend_memory_contexts; @@ -136761,7 +135754,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations; ok ---- -@@ -2838,28 +4468,48 @@ +@@ -2838,28 +4454,48 @@ CREATE ROLE regress_group_indirect_manager; CREATE ROLE regress_group_member; GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; @@ -136777,7 +135770,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_group_direct_manager + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT regress_group TO regress_group_member; SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor @@ -136797,7 +135790,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_group_indirect_manager + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev GRANT regress_group TO regress_group_member; SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; - member | grantor @@ -136820,7 +135813,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- DROP ROLE regress_group; DROP ROLE regress_group_direct_manager; DROP ROLE regress_group_indirect_manager; -@@ -2871,22 +4521,59 @@ +@@ -2871,22 +4507,59 @@ CREATE SCHEMA regress_roleoption; GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; @@ -136841,7 +135834,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_roleoption_protagonist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE regress_roleoption.t1 (a int); CREATE TABLE regress_roleoption.t2 (a int); SET SESSION AUTHORIZATION regress_roleoption_donor; @@ -136850,7 +135843,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_roleoption_donor + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE regress_roleoption.t3 (a int); SET SESSION AUTHORIZATION regress_roleoption_recipient; +ERROR: at or near "regress_roleoption_recipient": syntax error: unimplemented: this syntax @@ -136858,7 +135851,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_roleoption_recipient + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE regress_roleoption.t4 (a int); SET SESSION AUTHORIZATION regress_roleoption_protagonist; +ERROR: at or near "regress_roleoption_protagonist": syntax error: unimplemented: this syntax @@ -136866,7 +135859,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/privileges.out -- +SET SESSION AUTHORIZATION regress_roleoption_protagonist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER TABLE regress_roleoption.t1 OWNER TO regress_roleoption_donor; -- fails, can't be come donor -ERROR: must be able to SET ROLE "regress_roleoption_donor" ALTER TABLE regress_roleoption.t2 OWNER TO regress_roleoption_recipient; -- works @@ -136904,7 +135897,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_bloom.out -- + macaddrcol macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev INSERT INTO brintest_bloom SELECT repeat(stringu1, 8)::bytea, substr(stringu1, 1, 1)::"char", @@ -136927,7 +135920,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_bloom.out -- + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev -- throw in some NULL's and different values INSERT INTO brintest_bloom (inetcol, cidrcol) SELECT inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, @@ -136940,7 +135933,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_bloom.out -- + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- test bloom specific index options -- ndistinct must be >= -1.0 CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( @@ -137074,7 +136067,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_bloom.out -- + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev SELECT brin_desummarize_range('brinidx_bloom', 0); - brin_desummarize_range ------------------------- @@ -137291,7 +136284,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_multi.out -- + macaddrcol macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev INSERT INTO brintest_multi SELECT 142857 * tenthous, thousand, @@ -137312,7 +136305,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_multi.out -- + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev -- throw in some NULL's and different values INSERT INTO brintest_multi (inetcol, cidrcol) SELECT inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, @@ -137325,7 +136318,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_multi.out -- + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- test minmax-multi specific index options -- number of values must be >= 16 CREATE INDEX brinidx_multi ON brintest_multi USING brin ( @@ -137461,7 +136454,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/brin_multi.out -- + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/45813/v24.2 ++See: https://go.crdb.dev/issue-v/45813/dev SELECT brin_desummarize_range('brinidx_multi', 0); - brin_desummarize_range ------------------------- @@ -137753,11 +136746,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/async.out --label UNLISTEN notify_async2; +NOTICE: unimplemented: CRDB does not support LISTEN, making UNLISTEN a no-op +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41522/v24.2 ++See: https://go.crdb.dev/issue-v/41522/dev UNLISTEN *; +NOTICE: unimplemented: CRDB does not support LISTEN, making UNLISTEN a no-op +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41522/v24.2 ++See: https://go.crdb.dev/issue-v/41522/dev -- Should return zero while there are no pending notifications. -- src/test/isolation/specs/async-notify.spec tests for actual usage. SELECT pg_notification_queue_usage(); @@ -138009,7 +137002,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_operator.ou +CREATE OPERATOR === ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype FROM pg_depend WHERE classid = 'pg_operator'::regclass AND @@ -138210,7 +137203,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_operator.ou +SET SESSION AUTHORIZATION regress_alter_op_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); -ERROR: must be owner of operator === +ERROR: at or near "operator": syntax error @@ -138606,7 +137599,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tidrangescan.out DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev FETCH NEXT c; - ctid -------- @@ -138665,7 +137658,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= - +ERROR: generate_series(): unimplemented: nested set-returning functions +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/26234/v24.2 ++See: https://go.crdb.dev/issue-v/26234/dev -- but we've traditionally rejected the same in FROM SELECT * FROM generate_series(1, generate_series(1, 3)); -ERROR: set-returning functions must appear at top level of FROM @@ -138686,7 +137679,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= - +ERROR: generate_series(): unimplemented: nested set-returning functions +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/26234/v24.2 ++See: https://go.crdb.dev/issue-v/26234/dev -- check proper nesting of SRFs in different expressions explain (verbose, costs off) SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); @@ -138717,7 +137710,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= - +ERROR: generate_series(): unimplemented: nested set-returning functions +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/26234/v24.2 ++See: https://go.crdb.dev/issue-v/26234/dev CREATE TABLE few(id int, dataa text, datab text); INSERT INTO few VALUES(1, 'a', 'foo'),(2, 'a', 'bar'),(3, 'b', 'bar'); -- SRF with a provably-dummy relation @@ -139073,28 +138066,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa; +ERROR: at or near "dataa": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g; +ERROR: at or near "dataa": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g); +ERROR: at or near "dataa": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa; - dataa | b | g | count --------+-----+---+------- @@ -139129,7 +138122,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; - dataa | b | g | count --------+-----+---+------- @@ -139164,7 +138157,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46280/v24.2 ++See: https://go.crdb.dev/issue-v/46280/dev reset enable_hashagg; +ERROR: unimplemented: the configuration setting "enable_hashagg" is not supported +HINT: You have attempted to use a feature that is not yet implemented. @@ -139313,7 +138306,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsrf.out --label= +CREATE OPERATOR |@| (PROCEDURE = unnest, RIGHTARG = ANYARRAY) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT |@|ARRAY[1,2,3]; - ?column? ----------- @@ -139413,7 +138406,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev -- Clean up in case a prior regression run failed SET client_min_messages TO 'warning'; DROP ROLE IF EXISTS regress_alter_generic_user1; @@ -139438,7 +138431,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 1'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -139451,7 +138444,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE AGGREGATE alt_agg1 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE alt_agg2 ( sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 ); @@ -139460,7 +138453,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE AGGREGATE alt_agg2 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate +ERROR: at or near "alt_func1": syntax error: unimplemented: this syntax @@ -139468,7 +138461,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate +ERROR: at or near "alt_func1": syntax error: unimplemented: this syntax @@ -139476,7 +138469,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) -ERROR: function alt_func1(integer) is not an aggregate +ERROR: at or near "alt_func1": syntax error: unimplemented: this syntax @@ -139484,7 +138477,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) -ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" +ERROR: function alt_func2(INT8) already exists in schema "alt_nsp1" @@ -139501,14 +138494,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK +ERROR: at or near "alt_agg1": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ERROR: at or near "alt_agg2": syntax error: unimplemented: this syntax @@ -139516,28 +138509,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK +ERROR: at or near "alt_agg2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK +ERROR: at or near "alt_agg2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev SET SESSION AUTHORIZATION regress_alter_generic_user2; +ERROR: at or near "regress_alter_generic_user2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 2'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -139550,7 +138543,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE AGGREGATE alt_agg1 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE alt_agg2 ( sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = -100 ); @@ -139559,7 +138552,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE AGGREGATE alt_agg2 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) -ERROR: must be owner of function alt_func3 ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK @@ -139582,14 +138575,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK +ERROR: at or near "alt_agg1": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 +ERROR: at or near "alt_agg3": syntax error: unimplemented: this syntax @@ -139597,7 +138590,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ERROR: at or near "alt_agg2": syntax error: unimplemented: this syntax @@ -139605,7 +138598,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) -ERROR: must be owner of function alt_agg3 +ERROR: at or near "alt_agg3": syntax error: unimplemented: this syntax @@ -139613,7 +138606,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" +ERROR: at or near "alt_agg2": syntax error: unimplemented: this syntax @@ -139621,7 +138614,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev RESET SESSION AUTHORIZATION; +ERROR: at or near "authorization": syntax error +DETAIL: source SQL: @@ -139661,7 +138654,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ERROR: at or near "alt_conv1": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -139732,7 +138725,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ERROR: at or near "alt_conv1": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -139950,14 +138943,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; +ERROR: at or near "handler": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK +ERROR: at or near "language": syntax error +DETAIL: source SQL: @@ -139976,7 +138969,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) -ERROR: language "alt_lang2" already exists +ERROR: at or near "language": syntax error @@ -140045,21 +139038,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); +ERROR: at or near "@": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); +ERROR: at or near "@": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) -ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ERROR: at or near "operator": syntax error @@ -140085,14 +139078,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); +ERROR: at or near "@": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) -ERROR: must be owner of operator @+@ +ERROR: at or near "operator": syntax error @@ -140147,14 +139140,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf1 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR FAMILY alt_opf2 USING hash; +ERROR: at or near "family": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR FAMILY alt_opf2 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140173,14 +139166,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; +ERROR: at or near "class": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140199,7 +139192,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) -ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" +ERROR: at or near "operator": syntax error @@ -140276,14 +139269,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf1 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR FAMILY alt_opf2 USING hash; +ERROR: at or near "family": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR FAMILY alt_opf2 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140302,14 +139295,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; +ERROR: at or near "class": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140328,7 +139321,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) -ERROR: must be owner of operator family alt_opf3 +ERROR: at or near "operator": syntax error @@ -140459,7 +139452,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf4 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf4 USING btree ADD -- int4 vs int2 OPERATOR 1 < (int4, int2) , @@ -140508,7 +139501,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf4 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf4 USING invalid_index_method ADD OPERATOR 1 < (int4, int2); -- invalid indexing_method -ERROR: access method "invalid_index_method" does not exist +ERROR: at or near "operator": syntax error @@ -140588,7 +139581,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf5 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SET ROLE regress_alter_generic_user5; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); @@ -140629,7 +139622,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SET ROLE regress_alter_generic_user6; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); @@ -140647,7 +139640,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf7 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf7 USING btree ADD OPERATOR 1 < (int4, int2); +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140685,7 +139678,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf8 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf8 USING btree ADD OPERATOR 1 < (int4, int4); +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140715,7 +139708,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf9 USING gist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf9 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140745,7 +139738,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf10 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf10 USING btree ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; -ERROR: access method "btree" does not support ordering operators +ERROR: at or near "operator": syntax error @@ -140776,7 +139769,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf11 USING gist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf11 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; +ERROR: at or near "operator": syntax error +DETAIL: source SQL: @@ -140813,7 +139806,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf12 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); @@ -140849,7 +139842,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf13 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); @@ -140885,7 +139878,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf14 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); @@ -140921,7 +139914,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf15 USING hash + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ERROR: current transaction is aborted, commands ignored until end of transaction block ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); @@ -140957,7 +139950,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf16 USING gist + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); -ERROR: associated data types must be specified for index support function +ERROR: at or near "operator": syntax error @@ -140988,7 +139981,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf17 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement -ERROR: operator number 1 for (integer,integer) appears more than once +ERROR: at or near "operator": syntax error @@ -141071,7 +140064,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf18 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf18 USING btree DROP OPERATOR 1 (int4, int4); -ERROR: operator 1(integer,integer) does not exist in operator family "alt_opf18" +ERROR: at or near "operator": syntax error @@ -141130,7 +140123,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE OPERATOR FAMILY alt_opf19 USING btree + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); -ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist +ERROR: at or near "operator": syntax error @@ -141190,7 +140183,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; @@ -141232,7 +140225,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; @@ -141308,21 +140301,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) -ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" +ERROR: at or near "text": syntax error @@ -141361,21 +140354,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) -ERROR: must be owner of text search dictionary alt_ts_dict3 +ERROR: at or near "text": syntax error @@ -141448,21 +140441,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) -ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" +ERROR: at or near "text": syntax error @@ -141501,21 +140494,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +SET SESSION AUTHORIZATION regress_alter_generic_user2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) -ERROR: must be owner of text search configuration alt_ts_conf3 +ERROR: at or near "text": syntax error @@ -141588,14 +140581,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (lexize=dsimple_lexize) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp1" +ERROR: at or near "text": syntax error @@ -141621,7 +140614,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" +ERROR: at or near "text": syntax error @@ -141637,7 +140630,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT nspname, tmplname FROM pg_ts_template t, pg_namespace n WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' @@ -141662,7 +140655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH PARSER alt_ts_prs1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH PARSER alt_ts_prs2 (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +ERROR: at or near "search": syntax error: unimplemented: this syntax @@ -141670,7 +140663,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH PARSER alt_ts_prs2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp1" +ERROR: at or near "text": syntax error @@ -141697,7 +140690,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH PARSER alt_ts_prs2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) -ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" +ERROR: at or near "text": syntax error @@ -141713,7 +140706,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT nspname, prsname FROM pg_ts_parser t, pg_namespace n WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' @@ -141771,14 +140764,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_generic.out +DROP LANGUAGE alt_lang2 CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev DROP LANGUAGE alt_lang3 CASCADE; +ERROR: at or near "cascade": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP LANGUAGE alt_lang3 CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev DROP SCHEMA alt_nsp1 CASCADE; -NOTICE: drop cascades to 28 other objects -DETAIL: drop cascades to function alt_func3(integer) @@ -142434,7 +141427,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_role_limited_admin + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE ROLE regress_nosuch_superuser SUPERUSER; -ERROR: permission denied to create role -DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. @@ -142497,7 +141490,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_role_admin + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; +ERROR: at or near "bypassrls": syntax error +DETAIL: source SQL: @@ -142635,7 +141628,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_createrole + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE DATABASE regress_nosuch_db; -ERROR: permission denied to create database -- ok, regress_createrole can create new roles @@ -142698,7 +141691,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_tenant + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE tenant_table (i integer); CREATE INDEX tenant_idx ON tenant_table(i); CREATE VIEW tenant_view AS SELECT * FROM pg_catalog.pg_class; @@ -142710,7 +141703,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_createrole + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP INDEX tenant_idx; -ERROR: must be owner of index tenant_idx +NOTICE: the data for dropped indexes is reclaimed asynchronously @@ -142746,7 +141739,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_tenant2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE tenant2_table (i integer); REVOKE ALL PRIVILEGES ON tenant2_table FROM PUBLIC; -- ok, because we have SET and INHERIT on regress_tenant2 @@ -142756,7 +141749,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_createrole + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SCHEMA regress_tenant2_schema AUTHORIZATION regress_tenant2; ALTER SCHEMA regress_tenant2_schema OWNER TO regress_createrole; +ERROR: role/user "regress_createrole" does not exist @@ -142879,7 +141872,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_role.out - +SET SESSION AUTHORIZATION regress_role_admin + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev DROP ROLE regress_nosuch_superuser; -ERROR: role "regress_nosuch_superuser" does not exist +ERROR: role/user "regress_nosuch_superuser" does not exist @@ -142995,14 +141988,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc.out --label= LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE FUNCTION reverse_name(name) RETURNS name AS :'regresslib' LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev -- -- BTREE -- @@ -143041,7 +142034,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc.out --label= +COPY onek TO ':abs_builddir/results/onek.data' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/97181/v24.2 ++See: https://go.crdb.dev/issue-v/97181/dev CREATE TEMP TABLE onek_copy (LIKE onek); COPY onek_copy FROM :'filename'; +ERROR: at or near ":abs_builddir/results/onek.data": syntax error: unimplemented: this syntax @@ -144495,7 +143488,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sysviews.out --la -- We expect no walreceiver running in this test select count(*) = 0 as ok from pg_stat_wal_receiver; ok -@@ -109,30 +97,23 @@ +@@ -109,30 +97,24 @@ -- This is to record the prevailing planner enable_foo settings during -- a regression test run. select name, setting from pg_settings where name like 'enable%'; @@ -144526,7 +143519,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sysviews.out --la + name | setting +--------------------------------------------------+--------- + enable_auto_rehoming | off -+ enable_create_stats_using_extremes | off ++ enable_create_stats_using_extremes | on ++ enable_create_stats_using_extremes_bool_enum | off + enable_durable_locking_for_serializable | off + enable_experimental_alter_column_type_general | off + enable_implicit_fk_locking_for_serializable | off @@ -144539,11 +143533,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sysviews.out --la + enable_shared_locking_for_serializable | off + enable_super_regions | off + enable_zigzag_join | off -+(14 rows) ++(15 rows) -- Test that the pg_timezone_names and pg_timezone_abbrevs views are -- more-or-less working. We can't test their contents in any great detail -@@ -149,21 +130,45 @@ +@@ -149,21 +131,45 @@ select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; ok ---- @@ -144618,7 +143612,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc_functions.ou +SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nonnulls --------------- @@ -144695,7 +143689,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc_functions.ou +SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]); - num_nulls ------------ @@ -144865,7 +143859,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc_functions.ou LANGUAGE C STRICT IMMUTABLE; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev SELECT test_canonicalize_path('/'); - test_canonicalize_path ------------------------- @@ -145361,7 +144355,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/misc_functions.ou LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func; +ERROR: at or near "test_support_func": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -145584,15 +144578,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. +returns table (out_line text) language plpgsql + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev create or replace function explain_analyze_inc_sort_nodes(query text) returns jsonb language plpgsql as -@@ -82,6 +102,18 @@ +@@ -82,6 +102,19 @@ return matching_nodes; end; $$; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_dyn_exec is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -145607,7 +144602,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. create or replace function explain_analyze_inc_sort_nodes_without_memory(query text) returns jsonb language plpgsql as -@@ -104,6 +136,27 @@ +@@ -104,6 +137,27 @@ return nodes; end; $$; @@ -145635,7 +144630,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text) returns bool language plpgsql as -@@ -127,467 +180,119 @@ +@@ -127,467 +181,119 @@ return true; end; $$; @@ -146182,7 +145177,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 70; a | b ---+---- -@@ -668,17 +373,11 @@ +@@ -668,17 +374,11 @@ -- transition point) but only retain 5. Thus when we transition modes, all -- tuples in the full sort state have different prefix keys. explain (costs off) select * from (select * from t order by a) s order by a, b limit 5; @@ -146205,7 +145200,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 5; a | b ---+--- -@@ -694,429 +393,110 @@ +@@ -694,429 +394,110 @@ -- We force the planner to choose a plan with incremental sort on the right side -- of a nested loop join node. That way we trigger the rescan code path. set local enable_hashjoin = off; @@ -146705,7 +145700,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 31; a | b ----+---- -@@ -1154,17 +534,11 @@ +@@ -1154,17 +535,11 @@ (31 rows) explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; @@ -146728,7 +145723,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 32; a | b ----+---- -@@ -1203,17 +577,11 @@ +@@ -1203,17 +578,11 @@ (32 rows) explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; @@ -146751,7 +145746,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 33; a | b ----+---- -@@ -1253,17 +621,11 @@ +@@ -1253,17 +622,11 @@ (33 rows) explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; @@ -146774,7 +145769,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 65; a | b ----+---- -@@ -1335,17 +697,11 @@ +@@ -1335,17 +698,11 @@ (65 rows) explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; @@ -146797,7 +145792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. select * from (select * from t order by a) s order by a, b limit 66; a | b ----+---- -@@ -1421,150 +777,224 @@ +@@ -1421,150 +778,224 @@ drop table t; -- Incremental sort vs. parallel queries set min_parallel_table_scan_size = '1kB'; @@ -147124,7 +146119,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/incremental_sort. -- Parallel sort with an aggregate that can be safely generated in parallel, -- but we can't sort by partial aggregate values. explain (costs off) select count(*) -@@ -1572,91 +1002,45 @@ +@@ -1572,91 +1003,45 @@ join tenk1 t2 on t1.unique1 = t2.unique2 join tenk1 t3 on t2.unique1 = t3.unique1 order by count(*); @@ -147286,7 +146281,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label +SET SESSION AUTHORIZATION regress_merge_privs + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev EXPLAIN (COSTS OFF) MERGE INTO target t USING source AS s @@ -147483,7 +146478,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label +SET SESSION AUTHORIZATION regress_merge_no_privs + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev MERGE INTO target USING source2 ON target.tid = source2.sid @@ -147501,7 +146496,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label +SET SESSION AUTHORIZATION regress_merge_privs + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev MERGE INTO target2 USING source ON target2.tid = source.sid @@ -147819,9 +146814,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- remove constraints alter table target drop CONSTRAINT target_pkey; -+ERROR: relation "target" (1245): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "target" (1248): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev alter table target alter column tid drop not null; -- multiple actions BEGIN; @@ -148260,95 +147255,61 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label DROP TABLE wq_target, wq_source; -- test triggers -@@ -873,25 +943,94 @@ +@@ -873,25 +943,60 @@ END IF; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_bsi": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_bsu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_bsd": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_asi": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_asu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_asd": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_bri": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_bru": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_brd": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_ari": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_aru": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); -+ERROR: at or near "merge_ard": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc () -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- now the classic UPSERT, with a DELETE BEGIN; UPDATE target SET balance = 0 WHERE tid = 3; @@ -148359,7 +147320,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label --EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) MERGE INTO target t USING source AS s -@@ -902,26 +1041,12 @@ +@@ -902,26 +1007,12 @@ DELETE WHEN NOT MATCHED THEN INSERT VALUES (s.sid, s.delta); @@ -148391,28 +147352,27 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- Test behavior of triggers that turn UPDATE/DELETE into no-ops create or replace function skip_merge_op() returns trigger -@@ -931,6 +1056,7 @@ +@@ -931,6 +1022,9 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev SELECT * FROM target full outer join source on (sid = tid); tid | balance | sid | delta -----+---------+-----+------- -@@ -942,6 +1068,12 @@ +@@ -942,6 +1036,9 @@ create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); -+ERROR: at or near "merge_skip": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DO $$ DECLARE result integer; -@@ -961,17 +1093,10 @@ +@@ -961,17 +1058,10 @@ RAISE NOTICE 'ROW_COUNT = %', result; END; $$; @@ -148434,22 +147394,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); tid | balance | sid | delta -----+---------+-----+------- -@@ -982,7 +1107,14 @@ +@@ -982,7 +1072,11 @@ (4 rows) DROP TRIGGER merge_skip ON target; -+ERROR: at or near "merge_skip": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER merge_skip ON target -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION skip_merge_op(); +ERROR: unknown function: skip_merge_op() -- test from PL/pgSQL -- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO BEGIN; -@@ -995,12 +1127,10 @@ +@@ -995,12 +1089,10 @@ UPDATE SET balance = t.balance - s.delta; END; $$; @@ -148466,7 +147423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; --source constants BEGIN; -@@ -1009,19 +1139,12 @@ +@@ -1009,19 +1101,12 @@ ON t.tid = s.sid WHEN NOT MATCHED THEN INSERT (tid, balance) VALUES (s.sid, s.delta); @@ -148491,7 +147448,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; --source query BEGIN; -@@ -1030,19 +1153,12 @@ +@@ -1030,19 +1115,12 @@ ON t.tid = s.sid WHEN NOT MATCHED THEN INSERT (tid, balance) VALUES (s.sid, s.delta); @@ -148516,7 +147473,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; MERGE INTO target t -@@ -1050,19 +1166,12 @@ +@@ -1050,19 +1128,12 @@ ON t.tid = s.sid WHEN NOT MATCHED THEN INSERT (tid, balance) VALUES (s.sid, s.newname); @@ -148541,7 +147498,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; --self-merge BEGIN; -@@ -1073,24 +1182,12 @@ +@@ -1073,24 +1144,12 @@ UPDATE SET balance = t1.balance + t2.balance WHEN NOT MATCHED THEN INSERT VALUES (t2.tid, t2.balance); @@ -148571,7 +147528,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; MERGE INTO target t -@@ -1098,16 +1195,12 @@ +@@ -1098,16 +1157,12 @@ ON t.tid = s.sid WHEN NOT MATCHED THEN INSERT (tid, balance) VALUES (s.sid, s.delta); @@ -148593,7 +147550,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; MERGE INTO target t -@@ -1120,19 +1213,12 @@ +@@ -1120,19 +1175,12 @@ ON t.tid = s.sid WHEN NOT MATCHED THEN INSERT (tid, balance) VALUES (s.sid, s.delta); @@ -148618,7 +147575,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- plpgsql parameters and results BEGIN; -@@ -1154,41 +1240,38 @@ +@@ -1154,41 +1202,38 @@ RETURN result; END; $$; @@ -148684,7 +147641,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; PREPARE foom2 (integer, integer) AS -@@ -1197,26 +1280,25 @@ +@@ -1197,26 +1242,25 @@ ON t.tid = $1 WHEN MATCHED THEN UPDATE SET balance = $2; @@ -148721,7 +147678,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); BEGIN; -@@ -1225,14 +1307,12 @@ +@@ -1225,14 +1269,12 @@ ON tid = sid WHEN MATCHED AND t.balance > delta THEN UPDATE SET balance = t.balance + delta; @@ -148741,7 +147698,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- try a view CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; -@@ -1242,14 +1322,12 @@ +@@ -1242,14 +1284,12 @@ ON tid = sid WHEN MATCHED THEN UPDATE SET balance = v.balance + delta; @@ -148761,7 +147718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- ambiguous reference to a column BEGIN; -@@ -1262,9 +1340,10 @@ +@@ -1262,9 +1302,10 @@ INSERT (balance, tid) VALUES (balance + delta, sid) WHEN MATCHED AND tid < 2 THEN DELETE; @@ -148775,7 +147732,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); -@@ -1277,14 +1356,12 @@ +@@ -1277,14 +1318,12 @@ INSERT (balance, tid) VALUES (balance + delta, sid) WHEN MATCHED AND tid < 2 THEN DELETE; @@ -148795,7 +147752,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- CTEs BEGIN; -@@ -1301,6 +1378,13 @@ +@@ -1301,6 +1340,13 @@ INSERT (balance, tid) VALUES (balance + delta, sid) WHEN MATCHED AND tid < 2 THEN DELETE; @@ -148809,7 +147766,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- RETURNING BEGIN; -@@ -1315,15 +1399,18 @@ +@@ -1315,15 +1361,18 @@ WHEN MATCHED AND tid < 2 THEN DELETE RETURNING *; @@ -148831,13 +147788,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; CREATE FUNCTION explain_merge(query text) RETURNS SETOF text -@@ -1340,48 +1427,21 @@ +@@ -1340,48 +1389,21 @@ END LOOP; END; $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev -- only updates SELECT explain_merge(' MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a @@ -148885,7 +147842,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- updates + deletes SELECT explain_merge(' MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -@@ -1389,43 +1449,13 @@ +@@ -1389,43 +1411,13 @@ UPDATE SET b = t.b + 1 WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN DELETE'); @@ -148931,7 +147888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- all three SELECT explain_merge(' MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a -@@ -1435,45 +1465,16 @@ +@@ -1435,45 +1427,16 @@ DELETE WHEN NOT MATCHED AND s.a < 20 THEN INSERT VALUES (a, b)'); @@ -148980,7 +147937,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- Subqueries BEGIN; MERGE INTO sq_target t -@@ -1481,12 +1482,12 @@ +@@ -1481,12 +1444,12 @@ ON tid = sid WHEN MATCHED THEN UPDATE SET balance = (SELECT count(*) FROM sq_target); @@ -148998,7 +147955,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; MERGE INTO sq_target t -@@ -1494,12 +1495,12 @@ +@@ -1494,12 +1457,12 @@ ON tid = sid WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN UPDATE SET balance = 42; @@ -149016,7 +147973,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; BEGIN; MERGE INTO sq_target t -@@ -1507,30 +1508,57 @@ +@@ -1507,30 +1470,57 @@ ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) WHEN MATCHED THEN UPDATE SET balance = 42; @@ -149080,7 +148037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- try simple MERGE BEGIN; MERGE INTO pa_target t -@@ -1540,25 +1568,12 @@ +@@ -1540,25 +1530,12 @@ UPDATE SET balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (sid, delta, 'inserted by merge'); @@ -149111,7 +148068,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- same with a constant qual BEGIN; -@@ -1569,31 +1584,12 @@ +@@ -1569,31 +1546,12 @@ UPDATE SET balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (sid, delta, 'inserted by merge'); @@ -149148,7 +148105,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- try updating the partition key column BEGIN; -@@ -1614,52 +1610,80 @@ +@@ -1614,52 +1572,80 @@ RETURN result; END; $$; @@ -149252,7 +148209,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- try simple MERGE BEGIN; MERGE INTO pa_target t -@@ -1669,25 +1693,12 @@ +@@ -1669,25 +1655,12 @@ UPDATE SET balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (sid, delta, 'inserted by merge'); @@ -149283,7 +148240,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- same with a constant qual BEGIN; -@@ -1699,29 +1710,12 @@ +@@ -1699,29 +1672,12 @@ UPDATE SET balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (sid, delta, 'inserted by merge'); @@ -149318,7 +148275,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; -- try updating the partition key column BEGIN; -@@ -1732,64 +1726,110 @@ +@@ -1732,64 +1688,110 @@ UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (sid, delta, 'inserted by merge'); @@ -149448,7 +148405,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- try simple MERGE BEGIN; MERGE INTO pa_target t -@@ -1799,91 +1839,78 @@ +@@ -1799,91 +1801,78 @@ UPDATE SET balance = balance + delta, val = val || ' updated by merge' WHEN NOT MATCHED THEN INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); @@ -149580,7 +148537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label INSERT INTO cj_source1 VALUES (1, 10, 100); INSERT INTO cj_source1 VALUES (1, 20, 200); INSERT INTO cj_source1 VALUES (2, 20, 300); -@@ -1898,6 +1925,10 @@ +@@ -1898,6 +1887,10 @@ ON t.tid = sid1 WHEN NOT MATCHED THEN INSERT VALUES (sid1, delta, sval); @@ -149591,7 +148548,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- try accessing columns from either side of the source join MERGE INTO cj_target t USING cj_source2 s2 -@@ -1907,6 +1938,10 @@ +@@ -1907,6 +1900,10 @@ INSERT VALUES (sid2, delta, sval) WHEN MATCHED THEN DELETE; @@ -149602,7 +148559,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- some simple expressions in INSERT targetlist MERGE INTO cj_target t USING cj_source2 s2 -@@ -1916,20 +1951,24 @@ +@@ -1916,20 +1913,24 @@ INSERT VALUES (sid2, delta + scat, sval) WHEN MATCHED THEN UPDATE SET val = val || ' updated by merge'; @@ -149634,7 +148591,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label -- try it with an outer join and PlaceHolderVar MERGE INTO cj_target t -@@ -1938,19 +1977,14 @@ +@@ -1938,19 +1939,14 @@ ON t.tid = fj.scat WHEN NOT MATCHED THEN INSERT (tid, balance, val) VALUES (fj.scat, fj.delta, fj.phv); @@ -149661,7 +148618,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; -@@ -1961,10 +1995,15 @@ +@@ -1961,10 +1957,15 @@ ON t.tid = s1.sid WHEN NOT MATCHED THEN INSERT VALUES (s2.sid, delta, sval); @@ -149677,7 +148634,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label MERGE INTO fs_target t USING generate_series(1,100,1) AS id ON t.a = id -@@ -1972,6 +2011,10 @@ +@@ -1972,6 +1973,10 @@ UPDATE SET b = b + id WHEN NOT MATCHED THEN INSERT VALUES (id, -1); @@ -149688,7 +148645,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label MERGE INTO fs_target t USING generate_series(1,100,2) AS id ON t.a = id -@@ -1979,10 +2022,14 @@ +@@ -1979,10 +1984,14 @@ UPDATE SET b = b + id, c = 'updated '|| id.*::text WHEN NOT MATCHED THEN INSERT VALUES (id, -1, 'inserted ' || id.*::text); @@ -149704,7 +148661,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label (1 row) DROP TABLE fs_target; -@@ -1995,12 +2042,29 @@ +@@ -1995,12 +2004,29 @@ peaktemp int, unitsales int ) WITH (autovacuum_enabled=off); @@ -149719,7 +148676,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label +) INHERITS (measurement) WITH (autovacuum_enabled=off) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE measurement_y2006m03 ( CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) ) INHERITS (measurement) WITH (autovacuum_enabled=off); @@ -149730,11 +148687,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label +) INHERITS (measurement) WITH (autovacuum_enabled=off) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE measurement_y2007m01 ( filler text, peaktemp int, -@@ -2009,8 +2073,14 @@ +@@ -2009,8 +2035,14 @@ unitsales int CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') ) WITH (autovacuum_enabled=off); @@ -149749,24 +148706,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); CREATE OR REPLACE FUNCTION measurement_insert_trigger() RETURNS TRIGGER AS $$ -@@ -2031,9 +2101,16 @@ +@@ -2031,9 +2063,15 @@ RETURN NULL; END; $$ LANGUAGE plpgsql ; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER insert_measurement_trigger BEFORE INSERT ON measurement FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); -+ERROR: at or near "insert_measurement_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER insert_measurement_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); -@@ -2041,18 +2118,19 @@ +@@ -2041,18 +2079,19 @@ INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; @@ -149795,7 +148751,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); -@@ -2072,25 +2150,12 @@ +@@ -2072,25 +2111,12 @@ WHEN NOT MATCHED THEN INSERT (city_id, logdate, peaktemp, unitsales) VALUES (city_id, logdate, peaktemp, unitsales); @@ -149826,7 +148782,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/merge.out --label ROLLBACK; MERGE into measurement m USING new_measurement nm ON -@@ -2102,58 +2167,68 @@ +@@ -2102,58 +2128,68 @@ WHEN NOT MATCHED THEN INSERT (city_id, logdate, peaktemp, unitsales) VALUES (city_id, logdate, peaktemp, unitsales); @@ -149936,7 +148892,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlb (bb TEXT) INHERITS (ctla) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE foo (LIKE nonexistent); ERROR: relation "nonexistent" does not exist -LINE 1: CREATE TABLE foo (LIKE nonexistent); @@ -149947,7 +148903,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); +ERROR: relation "inhe" does not exist SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */ @@ -149980,7 +148936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */ -ERROR: column "xx" specified more than once -+ERROR: relation "inhf" (1275): duplicate column name: "xx" ++ERROR: relation "inhf" (1278): duplicate column name: "xx" CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS); INSERT INTO inhf DEFAULT VALUES; SELECT * FROM inhf; /* Single entry with value 'text' */ @@ -150058,7 +149014,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev \d test_like_id_3 - Table "public.test_like_id_3" - Column | Type | Collation | Nullable | Default @@ -150274,7 +149230,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE test_like_5 (x point, y point, z point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev CREATE TABLE test_like_5x (p int CHECK (p > 0), q int GENERATED ALWAYS AS (p * 2) STORED); CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) @@ -150285,7 +149241,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like + INHERITS (test_like_5, test_like_5x) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d test_like_5c - Table "public.test_like_5c" - Column | Type | Collation | Nullable | Default @@ -150439,7 +149395,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev \d+ ctlt12_storage - Table "public.ctlt12_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -150463,7 +149419,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev \d+ ctlt12_comments - Table "public.ctlt12_comments" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -150490,7 +149446,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev \d+ ctlt1_inh - Table "public.ctlt1_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -150524,7 +149480,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d+ ctlt13_inh - Table "public.ctlt13_inh" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -150555,7 +149511,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev \d+ ctlt13_like - Table "public.ctlt13_like" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -150642,7 +149598,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); -NOTICE: merging column "a" with inherited definition -ERROR: column "a" has a storage parameter conflict @@ -150652,7 +149608,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/create_table_like +CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/47071/v24.2 ++See: https://go.crdb.dev/issue-v/47071/dev -- Check that LIKE isn't confused by a system catalog of the same name CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); \d+ public.pg_attrdef @@ -150894,14 +149850,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf +CREATE DOMAIN testdomain_sv AS text COLLATE "sv_SE" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE"; -- fails +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE TABLE collate_test4 ( + a int, + b testdomain_sv @@ -151158,7 +150114,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf +CREATE DOMAIN testdomain AS text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; +ERROR: type "testdomain" does not exist +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; @@ -151315,7 +150271,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf +$$; +ERROR: unimplemented: collation for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105245/v24.2 ++See: https://go.crdb.dev/issue-v/105245/dev +SELECT mylt2('a', 'B') as f; + f +--- @@ -151374,7 +150330,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf + ^ +HINT: try \h CREATE INDEX +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "C")); -- this is different grammatically -+ERROR: invalid locale c: language: tag is not well-formed +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +ERROR: unsupported binary operator: || (returning ) +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "C"); -- fail @@ -151384,12 +150339,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf + ^ +HINT: try \h CREATE INDEX +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C")); -- fail -+ERROR: invalid locale c: language: tag is not well-formed +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; -+ relname | pg_get_indexdef -+--------------------+----------------------------------------------------------------------------------------- ++ relname | pg_get_indexdef ++--------------------+------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON root.collate_tests.collate_test1 USING btree (b ASC) -+(1 row) ++ collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON root.collate_tests.collate_test1 USING btree ((b) ASC) ++ collate_test1_idx6 | CREATE INDEX collate_test1_idx6 ON root.collate_tests.collate_test1 USING btree ((a) ASC) ++(3 rows) + +-- schema manipulation commands +CREATE ROLE regress_test_role; @@ -151601,7 +150557,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf +CREATE DOMAIN collate_dep_dom1 AS text COLLATE test0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE TYPE collate_dep_test2 AS (x int, y text COLLATE test0); +ERROR: at or near "collate": syntax error +DETAIL: source SQL: @@ -151680,14 +150636,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.linux.utf +create type textrange_c as range(subtype=text, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev +create type textrange_en_us as range(subtype=text, collation="en_US"); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type textrange_en_us as range(subtype=text, collation="en_US") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev +select textrange_c('A','Z') @> 'b'::text; +ERROR: unknown function: textrange_c() +select textrange_en_us('A','Z') @> 'b'::text; @@ -151731,7 +150687,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +SET client_encoding TO WIN1252; +ERROR: unimplemented: unimplemented client encoding: "win1252" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/35882/v24.2 ++See: https://go.crdb.dev/issue-v/35882/dev +CREATE SCHEMA collate_tests; +SET search_path = collate_tests; +CREATE TABLE collate_test1 ( @@ -151807,10 +150763,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w + b text COLLATE "C" +) +^ -+INSERT INTO collate_test1 VALUES (1, 'abc'), (2, '�bc'), (3, 'bbc'), (4, 'ABC'); ++INSERT INTO collate_test1 VALUES (1, 'abc'), (2, '�bc'), (3, 'bbc'), (4, 'ABC'); +ERROR: lexical error: invalid UTF-8 byte sequence +DETAIL: source SQL: -+INSERT INTO collate_test1 VALUES (1, 'abc'), (2, '�bc'), (3, 'bbc'), (4, 'ABC'); ++INSERT INTO collate_test1 VALUES (1, 'abc'), (2, '�bc'), (3, 'bbc'), (4, 'ABC'); + ^ +HINT: try \h INSERT +INSERT INTO collate_test2 SELECT * FROM collate_test1; @@ -151841,14 +150797,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +CREATE DOMAIN testdomain_sv AS text COLLATE "sv_SE" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE"; -- fails +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE" + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE TABLE collate_test4 ( + a int, + b testdomain_sv @@ -151904,15 +150860,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +SELECT * FROM collate_test3 ORDER BY b; +ERROR: relation "collate_test3" does not exist +-- constant expression folding -+SELECT 'bbc' COLLATE "en_US" > '�bc' COLLATE "en_US" AS "true"; ++SELECT 'bbc' COLLATE "en_US" > '�bc' COLLATE "en_US" AS "true"; +ERROR: lexical error: invalid UTF-8 byte sequence +DETAIL: source SQL: -+SELECT 'bbc' COLLATE "en_US" > '�bc' COLLATE "en_US" AS "true"; ++SELECT 'bbc' COLLATE "en_US" > '�bc' COLLATE "en_US" AS "true"; + ^ -+SELECT 'bbc' COLLATE "sv_SE" > '�bc' COLLATE "sv_SE" AS "false"; ++SELECT 'bbc' COLLATE "sv_SE" > '�bc' COLLATE "sv_SE" AS "false"; +ERROR: lexical error: invalid UTF-8 byte sequence +DETAIL: source SQL: -+SELECT 'bbc' COLLATE "sv_SE" > '�bc' COLLATE "sv_SE" AS "false"; ++SELECT 'bbc' COLLATE "sv_SE" > '�bc' COLLATE "sv_SE" AS "false"; + ^ +-- LIKE/ILIKE +SELECT * FROM collate_test1 WHERE b LIKE 'abc'; @@ -151952,12 +150908,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +); +INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), + (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), -+ (9, '�b�'), (10, '�B�'); ++ (9, '�b�'), (10, '�B�'); +ERROR: lexical error: invalid UTF-8 byte sequence +DETAIL: source SQL: +INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), + (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), -+ (9, '�b�'), (10, '�B�'); ++ (9, '�b�'), (10, '�B�'); + ^ +HINT: try \h INSERT +SELECT b, @@ -152020,7 +150976,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +CREATE DOMAIN testdomain AS text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; +ERROR: type "testdomain" does not exist +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; @@ -152148,7 +151104,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +$$; +ERROR: unimplemented: collation for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105245/v24.2 ++See: https://go.crdb.dev/issue-v/105245/dev +SELECT mylt2('a', 'B') as f; + f +--- @@ -152199,7 +151155,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w + ^ +HINT: try \h CREATE INDEX +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "C")); -- this is different grammatically -+ERROR: invalid locale c: language: tag is not well-formed +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +ERROR: unsupported binary operator: || (returning ) +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "C"); -- fail @@ -152209,12 +151164,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w + ^ +HINT: try \h CREATE INDEX +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C")); -- fail -+ERROR: invalid locale c: language: tag is not well-formed +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; -+ relname | pg_get_indexdef -+--------------------+----------------------------------------------------------------------------------------- ++ relname | pg_get_indexdef ++--------------------+------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON root.collate_tests.collate_test1 USING btree (b ASC) -+(1 row) ++ collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON root.collate_tests.collate_test1 USING btree ((b) ASC) ++ collate_test1_idx6 | CREATE INDEX collate_test1_idx6 ON root.collate_tests.collate_test1 USING btree ((a) ASC) ++(3 rows) + +-- schema manipulation commands +CREATE ROLE regress_test_role; @@ -152426,7 +151382,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +CREATE DOMAIN collate_dep_dom1 AS text COLLATE test0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev +CREATE TYPE collate_dep_test2 AS (x int, y text COLLATE test0); +ERROR: at or near "collate": syntax error +DETAIL: source SQL: @@ -152505,14 +151461,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/collate.windows.w +create type textrange_c as range(subtype=text, collation="C") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev +create type textrange_en_us as range(subtype=text, collation="en_US"); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type textrange_en_us as range(subtype=text, collation="en_US") + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev +select textrange_c('A','Z') @> 'b'::text; +ERROR: unknown function: textrange_c() +select textrange_en_us('A','Z') @> 'b'::text; @@ -152697,7 +151653,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/amutils.out --lab +CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/6224/v24.2 ++See: https://go.crdb.dev/issue-v/6224/dev select col, prop, pg_index_column_has_property(o, col, prop) from (values ('fooindex'::regclass)) v1(o), (values (1,'orderable'),(2,'asc'),(3,'desc'), @@ -153128,21 +152084,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +create table rtest_emp (ename char(20), salary money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev create table rtest_emplog (ename char(20), who name, action char(10), newsal money, oldsal money); +ERROR: at or near ",": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table rtest_emplog (ename char(20), who name, action char(10), newsal money, oldsal money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev create table rtest_empmass (ename char(20), salary money); +ERROR: at or near ")": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table rtest_empmass (ename char(20), salary money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev create rule rtest_emp_ins as on insert to rtest_emp do insert into rtest_emplog values (new.ename, current_user, 'hired', new.salary, '0.00'); @@ -156255,7 +155211,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label -- check for planner problems with complex inherited UPDATES -- create table id (id serial primary key, name text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html -- currently, must respecify PKEY for each inherited subtable create table test_1 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition @@ -156264,7 +155220,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +create table test_1 (id integer primary key) inherits (id) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table test_2 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -156272,7 +155228,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +create table test_2 (id integer primary key) inherits (id) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table test_3 (id integer primary key) inherits (id); -NOTICE: merging column "id" with inherited definition +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -156280,7 +155236,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +create table test_3 (id integer primary key) inherits (id) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into test_1 (name) values ('Test 1'); +ERROR: relation "test_1" does not exist insert into test_1 (name) values ('Test 2'); @@ -156362,14 +155318,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create rule t1_ins_1 as on insert to t1 where new.a >= 0 and new.a < 10 do instead @@ -157906,7 +156862,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +SET SESSION AUTHORIZATION regress_rule_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev INSERT INTO ruletest_v1 VALUES (1); +ERROR: relation "ruletest_v1" does not exist RESET SESSION AUTHORIZATION; @@ -157944,7 +156900,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rules.out --label +SET SESSION AUTHORIZATION regress_rule_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev UPDATE ruletest_t1 t1 SET x = 0 FROM ruletest_t3 t3 WHERE t1.x = t3.x; -ERROR: permission denied for table ruletest_t3 RESET SESSION AUTHORIZATION; @@ -157997,7 +156953,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +create function check_estimated_rows(text) returns table (estimated int, actual int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev -- Verify failures CREATE TABLE ext_stats_test (x text, y int, z int); CREATE STATISTICS tst; @@ -158106,7 +157062,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +SET SESSION AUTHORIZATION regress_stats_ext + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; -ERROR: must be owner of statistics object ab1_a_b_stats +ERROR: at or near "statistics": syntax error @@ -158314,7 +157270,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +CREATE TABLE ab1c () INHERITS (ab1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO ab1 VALUES (1,1); CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; ANALYZE ab1; @@ -158328,14 +157284,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +CREATE TABLE stxdinh1() INHERITS(stxdinh) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE stxdinh2() INHERITS(stxdinh); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE stxdinh2() INHERITS(stxdinh) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a; INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; +ERROR: relation "stxdinh1" does not exist @@ -158713,7 +157669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l + SELECT i/100, i/100, i/100, cash_words((i/100)::money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev ANALYZE ndistinct; -- Group Aggregate, due to over-estimate of the number of groups SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); @@ -158883,7 +157839,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l + cash_words(mod(i,23)::int::money) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev ANALYZE ndistinct; SELECT s.stxkind, d.stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d @@ -160062,7 +159018,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l -- condition ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric; +ERROR: ALTER COLUMN TYPE from int to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); @@ -161039,7 +159995,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l -- check change of column type resets the MCV statistics ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric; +ERROR: ALTER COLUMN TYPE from int to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); @@ -162430,7 +161386,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +SET SESSION AUTHORIZATION regress_stats_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM tststats.priv_test_tbl; -- Permission denied -ERROR: permission denied for table priv_test_tbl + a | b @@ -162577,7 +161533,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -ERROR: permission denied for table priv_test_tbl +ERROR: at or near "<": syntax error @@ -162614,7 +161570,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +SET SESSION AUTHORIZATION regress_stats_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- @@ -162651,7 +161607,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats_ext.out --l +SET SESSION AUTHORIZATION regress_stats_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak - a | b ----+--- @@ -162726,7 +161682,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION 'regress_subscription_user' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- fail - no publications CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; -ERROR: syntax error at or near ";" @@ -162924,7 +161880,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION 'regress_subscription_user2' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo WITH (connect = false); -ERROR: permission denied to create subscription -DETAIL: Only roles with privileges of the "pg_create_subscription" role may create subscriptions. @@ -162949,7 +161905,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION 'regress_subscription_user' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- fail - invalid option combinations CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, copy_data = true); -ERROR: connect = false and copy_data = true are mutually exclusive options @@ -164169,7 +163125,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- fail, not enough privileges CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: permission denied for database regression @@ -164203,7 +163159,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); -ERROR: password is required -DETAIL: Non-superusers must provide a password in the connection string. @@ -164237,7 +163193,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, password_required = false); -ERROR: password_required=false is superuser-only -HINT: Subscriptions with the password_required option set to false may only be created or modified by the superuser. @@ -164271,7 +163227,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist password=regress_fakepassword' PUBLICATION testpub WITH (connect = false); -WARNING: subscription was created, but is not connected -HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. @@ -164320,7 +163276,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER SUBSCRIPTION regress_testsub2 RENAME TO regress_testsub; +ERROR: at or near "subscription": syntax error +DETAIL: source SQL: @@ -164342,7 +163298,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/subscription.out +SET SESSION AUTHORIZATION regress_subscription_user3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; -ERROR: permission denied for database regression +ERROR: at or near "subscription": syntax error @@ -164409,7 +163365,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - +SET SESSION AUTHORIZATION 'regress_publication_user' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- suppress warning that depends on wal_level SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_default; @@ -164554,7 +163510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - --- adding tables CREATE SCHEMA pub_test; CREATE TABLE testpub_tbl1 (id serial primary key, data text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE pub_test.testpub_nopk (foo int, bar int); CREATE VIEW testpub_view AS SELECT 1; CREATE TABLE testpub_parted (a int) PARTITION BY LIST (a); @@ -164588,7 +163544,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - + ^ +HINT: try \h ALTER CREATE TABLE testpub_tbl2 (id serial primary key, data text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html -- fail - can't add to for all tables publication ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2; -ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES @@ -164973,7 +163929,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - +CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; +ERROR: at or near "testpub3": syntax error: unimplemented: this syntax @@ -165224,11 +164180,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - +CREATE TABLE testpub_rf_tbl5 (a xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev CREATE SCHEMA testpub_rf_schema1; CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); CREATE SCHEMA testpub_rf_schema2; -@@ -291,777 +694,1943 @@ +@@ -291,777 +694,1942 @@ -- Firstly, test using the option publish='insert' because the row filter -- validation of referenced columns is less strict than for delete/update. CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); @@ -165658,14 +164614,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - -- fail - user-defined operators are not allowed CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; +ERROR: unknown function: hashint4() -+HINT: There is probably a typo in function name. Or the intention was to use a user-defined function in the function body, which is currently not supported. CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); +ERROR: at or near "=": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); -ERROR: invalid publication WHERE expression -LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); @@ -167448,7 +166403,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - -- ====================================================== -- Test cache invalidation FOR ALL TABLES publication SET client_min_messages = 'ERROR'; -@@ -1069,196 +2638,537 @@ +@@ -1069,196 +2637,537 @@ INSERT INTO testpub_tbl4 values(1); UPDATE testpub_tbl4 set a = 2; CREATE PUBLICATION testpub_foralltables FOR ALL TABLES; @@ -168077,13 +167032,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - -- adding schemas and tables CREATE SCHEMA pub_test1; CREATE SCHEMA pub_test2; -@@ -1266,262 +3176,524 @@ +@@ -1266,262 +3175,524 @@ CREATE SCHEMA "CURRENT_SCHEMA"; CREATE TABLE pub_test1.tbl (id int, data text); CREATE TABLE pub_test1.tbl1 (id serial primary key, data text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE pub_test2.tbl1 (id serial primary key, data text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); -- suppress warning that depends on wal_level SET client_min_messages = 'ERROR'; @@ -168777,7 +167732,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/publication.out - -- verify relation cache invalidations through update statement for the -- default REPLICA IDENTITY on the relation, if schema is part of the -- publication then update will fail because relation's relreplident -@@ -1530,208 +3702,664 @@ +@@ -1530,208 +3701,664 @@ INSERT INTO pub_test1.tbl VALUES(1, 'test'); -- fail UPDATE pub_test1.tbl SET id = 2; @@ -169555,7 +168510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/dependency.out -- CREATE USER regress_dep_user3; CREATE GROUP regress_dep_group; CREATE TABLE deptest (f1 serial primary key, f2 text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group; +ERROR: at or near "group": syntax error +DETAIL: source SQL: @@ -169621,7 +168576,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/dependency.out -- +SET SESSION AUTHORIZATION regress_dep_user0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- permission denied DROP OWNED BY regress_dep_user1; -ERROR: permission denied to drop objects @@ -169645,9 +168600,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/dependency.out -- +SET SESSION AUTHORIZATION regress_dep_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE deptest (a serial primary key, b text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html GRANT ALL ON deptest1 TO regress_dep_user2; RESET SESSION AUTHORIZATION; +ERROR: at or near "authorization": syntax error @@ -169784,7 +168739,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/dependency.out -- +SET SESSION AUTHORIZATION regress_dep_user1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE SCHEMA deptest; CREATE TABLE deptest (a serial primary key, b text); +ERROR: relation "root.public.deptest" already exists @@ -169800,7 +168755,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/dependency.out -- +CREATE TYPE deptest_range AS RANGE (SUBTYPE = int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev CREATE TABLE deptest2 (f1 int); -- make a serial column the hard way CREATE SEQUENCE ss1; @@ -169874,7 +168829,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xmlmap_1.out --la +CREATE DOMAIN testxmldomain AS varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6), w numeric(9,2), v smallint, u bigint, t real, s time, stz timetz, r timestamp, rtz timestamptz, q date, @@ -169887,7 +168842,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xmlmap_1.out --la + p xml, o testxmldomain, n bool, m bytea, aaa text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev ALTER TABLE testxmlschema.test2 DROP COLUMN aaa; +ERROR: relation "testxmlschema.test2" does not exist INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def', @@ -169966,7 +168921,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xmlmap_1.out --la DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2; +ERROR: unimplemented: DECLARE CURSOR WITH HOLD can only be used in transaction blocks +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77101/v24.2 ++See: https://go.crdb.dev/issue-v/77101/dev SELECT cursor_to_xml('xc'::refcursor, 5, false, true, ''); -ERROR: unsupported XML feature -DETAIL: This functionality requires the server to be built with libxml support. @@ -170012,14 +168967,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xmlmap_1.out --la +CREATE DOMAIN testboolxmldomain AS bool + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN testdatexmldomain AS date; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN testdatexmldomain AS date + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TABLE testxmlschema.test3 AS SELECT true c1, true::testboolxmldomain c2, @@ -171611,7 +170566,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/select_views.out +SET SESSION AUTHORIZATION regress_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- -- scenario: if a qualifier with tiny-cost is given, it shall be launched -- prior to the security policy of the view. @@ -171903,7 +170858,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/select_views.out +SET SESSION AUTHORIZATION regress_alice + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev EXECUTE p1; -- To be perform as a view with security-barrier -NOTICE: f_leak => passwd123 - cid | name | tel | passwd @@ -172088,7 +171043,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY ispell ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'skies'); - ts_lexize ------------ @@ -172097,7 +171052,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'bookings'); - ts_lexize ----------------- @@ -172106,7 +171061,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'booking'); - ts_lexize ----------------- @@ -172115,7 +171070,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'foot'); - ts_lexize ------------ @@ -172124,7 +171079,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'foots'); - ts_lexize ------------ @@ -172133,7 +171088,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'rebookings'); - ts_lexize ----------------- @@ -172142,7 +171097,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'rebooking'); - ts_lexize ----------------- @@ -172151,7 +171106,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'rebook'); - ts_lexize ------------ @@ -172160,7 +171115,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'unbookings'); - ts_lexize ------------ @@ -172169,7 +171124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'unbooking'); - ts_lexize ------------ @@ -172178,7 +171133,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'unbook'); - ts_lexize ------------ @@ -172187,7 +171142,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'footklubber'); - ts_lexize ----------------- @@ -172196,7 +171151,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'footballklubber'); - ts_lexize ------------------------------------------------------- @@ -172205,7 +171160,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'ballyklubber'); - ts_lexize ----------------- @@ -172214,7 +171169,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('ispell', 'footballyklubber'); - ts_lexize ---------------------- @@ -172223,7 +171178,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Test ISpell dictionary with hunspell affix file CREATE TEXT SEARCH DICTIONARY hunspell ( Template=ispell, @@ -172235,7 +171190,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'skies'); - ts_lexize ------------ @@ -172244,7 +171199,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'bookings'); - ts_lexize ----------------- @@ -172253,7 +171208,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'booking'); - ts_lexize ----------------- @@ -172262,7 +171217,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'foot'); - ts_lexize ------------ @@ -172271,7 +171226,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'foots'); - ts_lexize ------------ @@ -172280,7 +171235,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'rebookings'); - ts_lexize ----------------- @@ -172289,7 +171244,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'rebooking'); - ts_lexize ----------------- @@ -172298,7 +171253,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'rebook'); - ts_lexize ------------ @@ -172307,7 +171262,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'unbookings'); - ts_lexize ------------ @@ -172316,7 +171271,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'unbooking'); - ts_lexize ------------ @@ -172325,7 +171280,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'unbook'); - ts_lexize ------------ @@ -172334,7 +171289,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'footklubber'); - ts_lexize ----------------- @@ -172343,7 +171298,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'footballklubber'); - ts_lexize ------------------------------------------------------- @@ -172352,7 +171307,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'ballyklubber'); - ts_lexize ----------------- @@ -172361,7 +171316,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell', 'footballyklubber'); - ts_lexize ---------------------- @@ -172370,7 +171325,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Test ISpell dictionary with hunspell affix file with FLAG long parameter CREATE TEXT SEARCH DICTIONARY hunspell_long ( Template=ispell, @@ -172382,7 +171337,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_long ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'skies'); - ts_lexize ------------ @@ -172391,7 +171346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'bookings'); - ts_lexize ----------------- @@ -172400,7 +171355,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'booking'); - ts_lexize ----------------- @@ -172409,7 +171364,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'foot'); - ts_lexize ------------ @@ -172418,7 +171373,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'foots'); - ts_lexize ------------ @@ -172427,7 +171382,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'rebookings'); - ts_lexize ----------------- @@ -172436,7 +171391,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'rebooking'); - ts_lexize ----------------- @@ -172445,7 +171400,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'rebook'); - ts_lexize ------------ @@ -172454,7 +171409,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'unbookings'); - ts_lexize ------------ @@ -172463,7 +171418,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'unbooking'); - ts_lexize ------------ @@ -172472,7 +171427,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'unbook'); - ts_lexize ------------ @@ -172481,7 +171436,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'booked'); - ts_lexize ------------ @@ -172490,7 +171445,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'footklubber'); - ts_lexize ----------------- @@ -172499,7 +171454,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'footballklubber'); - ts_lexize ------------------------------------------------------- @@ -172508,7 +171463,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'ballyklubber'); - ts_lexize ----------------- @@ -172517,7 +171472,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'ballsklubber'); - ts_lexize ----------------- @@ -172526,7 +171481,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'footballyklubber'); - ts_lexize ---------------------- @@ -172535,7 +171490,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_long', 'ex-machina'); - ts_lexize ---------------- @@ -172544,7 +171499,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Test ISpell dictionary with hunspell affix file with FLAG num parameter CREATE TEXT SEARCH DICTIONARY hunspell_num ( Template=ispell, @@ -172556,7 +171511,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_num ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'skies'); - ts_lexize ------------ @@ -172565,7 +171520,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'sk'); - ts_lexize ------------ @@ -172574,7 +171529,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'bookings'); - ts_lexize ----------------- @@ -172583,7 +171538,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'booking'); - ts_lexize ----------------- @@ -172592,7 +171547,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'foot'); - ts_lexize ------------ @@ -172601,7 +171556,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'foots'); - ts_lexize ------------ @@ -172610,7 +171565,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'rebookings'); - ts_lexize ----------------- @@ -172619,7 +171574,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'rebooking'); - ts_lexize ----------------- @@ -172628,7 +171583,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'rebook'); - ts_lexize ------------ @@ -172637,7 +171592,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'unbookings'); - ts_lexize ------------ @@ -172646,7 +171601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'unbooking'); - ts_lexize ------------ @@ -172655,7 +171610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'unbook'); - ts_lexize ------------ @@ -172664,7 +171619,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'booked'); - ts_lexize ------------ @@ -172673,7 +171628,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'footklubber'); - ts_lexize ----------------- @@ -172682,7 +171637,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'footballklubber'); - ts_lexize ------------------------------------------------------- @@ -172691,7 +171646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'ballyklubber'); - ts_lexize ----------------- @@ -172700,7 +171655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('hunspell_num', 'footballyklubber'); - ts_lexize ---------------------- @@ -172709,7 +171664,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Test suitability of affix and dict files CREATE TEXT SEARCH DICTIONARY hunspell_err ( Template=ispell, @@ -172722,7 +171677,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY hunspell_err ( Template=ispell, DictFile=ispell_sample, @@ -172734,7 +171689,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 ( Template=ispell, DictFile=hunspell_sample_long, @@ -172745,7 +171700,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 ( Template=ispell, DictFile=hunspell_sample_long, @@ -172756,7 +171711,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 ( Template=ispell, DictFile=hunspell_sample_num, @@ -172767,7 +171722,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE TEXT SEARCH DICTIONARY hunspell_err ( Template=ispell, DictFile=hunspell_sample_num, @@ -172779,7 +171734,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Synonym dictionary CREATE TEXT SEARCH DICTIONARY synonym ( Template=synonym, @@ -172790,7 +171745,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY synonym ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('synonym', 'PoStGrEs'); - ts_lexize ------------ @@ -172799,7 +171754,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('synonym', 'Gogle'); - ts_lexize ------------ @@ -172808,7 +171763,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('synonym', 'indices'); - ts_lexize ------------ @@ -172817,7 +171772,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- test altering boolean parameters SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption @@ -172842,7 +171797,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ------------------------------------------------- @@ -172873,7 +171828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; - dictinitoption ----------------------------------------------------- @@ -172894,7 +171849,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY thesaurus ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('thesaurus', 'one'); - ts_lexize ------------ @@ -172903,7 +171858,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Test ispell dictionary in configuration CREATE TEXT SEARCH CONFIGURATION ispell_tst ( COPY=english @@ -172913,7 +171868,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH CONFIGURATION ispell_tst ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION ispell_tst ALTER MAPPING FOR word, numword, asciiword, hword, numhword, asciihword, hword_part, hword_numpart, hword_asciipart WITH ispell, english_stem; @@ -172952,7 +171907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH CONFIGURATION hunspell_tst ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING REPLACE ispell WITH hunspell; +ERROR: at or near "text": syntax error @@ -173062,7 +172017,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH CONFIGURATION synonym_tst ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR asciiword, hword_asciipart, asciihword WITH synonym, english_stem; @@ -173109,7 +172064,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR asciiword, hword_asciipart, asciihword WITH synonym, thesaurus, english_stem; @@ -173152,7 +172107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsdicts.out --lab +CREATE TEXT SEARCH DICTIONARY tsdict_case + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.out --label=/mnt/data1/postgres/src/test/regress/results/functional_deps.out /mnt/data1/postgres/src/test/regress/expected/functional_deps.out /mnt/data1/postgres/src/test/regress/results/functional_deps.out --- /mnt/data1/postgres/src/test/regress/expected/functional_deps.out +++ /mnt/data1/postgres/src/test/regress/results/functional_deps.out @@ -173245,7 +172200,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o -- snip PRIMARY KEY (nid, vid) ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TEMP TABLE users ( uid integer NOT NULL default '0', name varchar(60) NOT NULL default '', @@ -173267,9 +172222,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv1 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -+ERROR: relation "articles" (1527): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles" (1530): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev DROP VIEW fdv1; -- multiple dependencies CREATE TEMP VIEW fdv2 AS @@ -173280,16 +172235,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -+ERROR: relation "articles" (1527): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles" (1530): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail -ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it -DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category -HINT: Use DROP ... CASCADE to drop the dependent objects too. -+ERROR: relation "articles_in_category" (1528): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles_in_category" (1531): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev DROP VIEW fdv2; -- nested queries CREATE TEMP VIEW fdv3 AS @@ -173300,9 +172255,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv3 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -+ERROR: relation "articles" (1527): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles" (1530): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev DROP VIEW fdv3; CREATE TEMP VIEW fdv4 AS SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id); @@ -173310,9 +172265,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o -ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it -DETAIL: view fdv4 depends on constraint articles_pkey on table articles -HINT: Use DROP ... CASCADE to drop the dependent objects too. -+ERROR: relation "articles" (1527): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles" (1530): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev DROP VIEW fdv4; -- prepared query plans: this results in failure on reuse PREPARE foo AS @@ -173320,9 +172275,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/functional_deps.o (0 rows) ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -+ERROR: relation "articles" (1527): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "articles" (1530): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev EXECUTE foo; -- fail -ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function + id | keywords | title | body | created @@ -173341,7 +172296,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create type int8alias1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create function int8alias1in(cstring) returns int8alias1 strict immutable language internal as 'int8in'; -NOTICE: return type int8alias1 is only a shell @@ -173361,14 +172316,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- + input = int8alias1in, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create type int8alias2; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create type int8alias2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create function int8alias2in(cstring) returns int8alias2 strict immutable language internal as 'int8in'; -NOTICE: return type int8alias2 is only a shell @@ -173388,7 +172343,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- + input = int8alias2in, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27793/v24.2 ++See: https://go.crdb.dev/issue-v/27793/dev create cast (int8 as int8alias1) without function; +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -173468,7 +172423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create operator = ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev alter operator family integer_ops using btree add operator 3 = (int8alias1, int8alias1); +ERROR: at or near "operator": syntax error @@ -173491,7 +172446,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create operator = ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev alter operator family integer_ops using btree add operator 3 = (int8alias2, int8alias2); +ERROR: at or near "operator": syntax error @@ -173513,7 +172468,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create operator = ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev alter operator family integer_ops using btree add operator 3 = (int8, int8alias1); +ERROR: at or near "operator": syntax error @@ -173535,7 +172490,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create operator = ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev alter operator family integer_ops using btree add operator 3 = (int8alias1, int8alias2); +ERROR: at or near "operator": syntax error @@ -173555,7 +172510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +create operator < ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev alter operator family integer_ops using btree add operator 1 < (int8alias1, int8alias1); +ERROR: at or near "operator": syntax error @@ -174082,7 +173037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/equivclass.out -- +set session authorization regress_user_ectest + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev -- with RLS active, the non-leakproof a.ff = 43 clause is not treated -- as a suitable source for an EquivalenceClass; currently, this is true -- even though the RLS clause has nothing to do directly with the EC @@ -175422,7 +174377,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE FUNCTION unbounded_syntax_test1a(x int) RETURNS TABLE (a int, b int, c int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int) LANGUAGE SQL AS $$ @@ -175435,7 +174390,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev -- These will apply the argument to the window specification inside the function. SELECT * FROM unbounded_syntax_test1a(2); - a | b | c @@ -175481,7 +174436,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE FUNCTION unbounded_syntax_test2a(unbounded int) RETURNS TABLE (a int, b int, c int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int) LANGUAGE SQL AS $$ @@ -175494,7 +174449,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev -- These will not apply the argument but instead treat UNBOUNDED as a keyword. SELECT * FROM unbounded_syntax_test2a(2); - a | b | c @@ -177455,7 +176410,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE AGGREGATE logging_agg_nonstrict (anyelement) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement) ( stype = text, @@ -177468,7 +176423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS $$ SELECT $1 || '*' || quote_nullable($2) $$ LANGUAGE SQL STRICT IMMUTABLE; @@ -177481,7 +176436,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE AGGREGATE logging_agg_strict (text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE logging_agg_strict_initcond (anyelement) ( stype = text, @@ -177494,7 +176449,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE AGGREGATE logging_agg_strict_initcond (anyelement) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- test strict and non-strict cases SELECT p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row, @@ -177594,7 +176549,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +CREATE AGGREGATE sum_int_randomrestart (int4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev WITH vs AS ( SELECT i, (random() * 100)::int4 AS v @@ -177704,7 +176659,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/window.out --labe +SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/41578/v24.2 ++See: https://go.crdb.dev/issue-v/41578/dev SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); i | sum @@ -178230,7 +177185,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/indirect_toast.ou LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev -- Other compression algorithms may cause the compressed data to be stored -- inline. pglz guarantees that the data is externalized, so stick to it. SET default_toast_compression = 'pglz'; @@ -178265,26 +177220,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/indirect_toast.ou SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -@@ -94,11 +97,18 @@ +@@ -94,11 +97,17 @@ NEW := make_tuple_indirect(NEW); RETURN NEW; END$$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER indtoasttest_update_indirect BEFORE INSERT OR UPDATE ON indtoasttest FOR EACH ROW EXECUTE PROCEDURE update_using_indirect(); -+ERROR: at or near "indtoasttest_update_indirect": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER indtoasttest_update_indirect -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- modification without changing varlenas UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); substring -@@ -151,6 +161,10 @@ +@@ -151,6 +160,10 @@ -- check we didn't screw with main/toast tuple visibility VACUUM FREEZE indtoasttest; @@ -178295,7 +177249,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/indirect_toast.ou SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -@@ -163,4 +177,6 @@ +@@ -163,4 +176,6 @@ DROP TABLE indtoasttest; DROP FUNCTION update_using_indirect(); @@ -179641,7 +178595,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_lexize('english_stem', 'identity'); - ts_lexize ------------ @@ -179650,7 +178604,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_lexize(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT * FROM ts_token_type('default'); - tokid | alias | description --------+-----------------+------------------------------------------ @@ -179706,6 +178660,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab + 1 | 1aew + 1 | werc + 1 | ewr ++ 1 | ad ++ 1 | qwe ++ 1 | dw ++ 1 | 2aew ++ 1 | werc ++ 1 | ewr ++ 1 | http ++ 1 | 3aew ++ 1 | werc ++ 1 | ewr ++ 1 | ad ++ 1 | qwe ++ 1 | dw ++ 1 | http ++ 1 | 4aew ++ 1 | werc ++ 1 | ewr ++ 1 | http ++ 1 | 5aew ++ 1 | werc ++ 1 | ewr ++ 1 | 8100 ++ 1 | ad ++ 1 | qwe ++ 1 | dw ++ 1 | 6aew ++ 1 | werc ++ 1 | ewr ++ 1 | 8100 + 1 | ad 1 | qwe - 12 | @ @@ -179739,35 +178722,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - 18 | /? - 12 | + 1 | dw -+ 1 | 2aew -+ 1 | werc -+ 1 | ewr -+ 1 | http -+ 1 | 3aew -+ 1 | werc -+ 1 | ewr -+ 1 | ad -+ 1 | qwe -+ 1 | dw -+ 1 | http -+ 1 | 4aew -+ 1 | werc -+ 1 | ewr -+ 1 | http -+ 1 | 5aew -+ 1 | werc -+ 1 | ewr -+ 1 | 8100 -+ 1 | ad -+ 1 | qwe -+ 1 | dw -+ 1 | 6aew -+ 1 | werc -+ 1 | ewr -+ 1 | 8100 -+ 1 | ad -+ 1 | qwe -+ 1 | dw + 1 | 7aew + 1 | werc + 1 | ewr @@ -179986,7 +178940,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- check parsing of URLs SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx'); - alias | description | token | dictionaries | dictionary | lexemes @@ -180000,7 +178954,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------------+--------------+------------+------------------------------ @@ -180013,7 +178967,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?'); - alias | description | token | dictionaries | dictionary | lexemes -----------+---------------+----------------------+--------------+------------+------------------------ @@ -180025,7 +178979,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); - alias | description | token | dictionaries | dictionary | lexemes -----------+-------------+------------------------+--------------+------------+-------------------------- @@ -180036,7 +178990,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT token, alias, dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims @@ -180050,7 +179004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_debug(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- to_tsquery SELECT to_tsquery('english', 'qwe & sKies '); to_tsquery @@ -180109,7 +179063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(to_tsvector('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180124,7 +179078,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(to_tsvector('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180139,7 +179093,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(to_tsvector('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180154,7 +179108,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(strip(to_tsvector('both stripped')), to_tsquery('both & stripped')); - ts_rank_cd @@ -180164,7 +179118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rank_cd(to_tsvector('unstripped') || strip(to_tsvector('stripped')), to_tsquery('unstripped & stripped')); - ts_rank_cd @@ -180174,7 +179128,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rank_cd(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --headline tests SELECT ts_headline('english', ' Day after day, day after day, @@ -180192,7 +179146,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180210,7 +179164,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180228,7 +179182,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180250,7 +179204,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180267,7 +179221,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180284,7 +179238,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180304,7 +179258,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180322,7 +179276,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180340,7 +179294,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180358,7 +179312,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -180376,7 +179330,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', 'Lorem ipsum urna. Nullam nullam ullamcorper urna.', to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), @@ -180388,7 +179342,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', 'Lorem ipsum urna. Nullam nullam ullamcorper urna.', phraseto_tsquery('english','ullamcorper urna'), @@ -180400,7 +179354,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', ' @@ -180426,7 +179380,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=2, MinWords=1'); - ts_headline -------------------- @@ -180435,7 +179389,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1'); - ts_headline ---------------------- @@ -180444,7 +179398,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1'); - ts_headline -------------------- @@ -180453,7 +179407,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, @@ -180475,7 +179429,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --Check if more than one fragments are displayed SELECT ts_headline('english', ' Day after day, day after day, @@ -180497,7 +179451,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --Fragments when there all query words are not in the document SELECT ts_headline('english', ' Day after day, day after day, @@ -180515,7 +179469,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --FragmentDelimiter option SELECT ts_headline('english', ' Day after day, day after day, @@ -180537,7 +179491,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --Fragments with phrase search SELECT ts_headline('english', 'Lorem ipsum urna. Nullam nullam ullamcorper urna.', @@ -180550,7 +179504,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Edge cases with empty query SELECT ts_headline('english', '', to_tsquery('english', '')); @@ -180562,7 +179516,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_headline('english', 'foo bar', to_tsquery('english', '')); -NOTICE: text-search query doesn't contain lexemes: "" @@ -180573,7 +179527,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev --Rewrite sub system CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT); \set ECHO none @@ -180617,7 +179571,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); +ERROR: unimplemented: column keyword is of type tsquery and thus is not indexable +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/35730/v24.2 ++See: https://go.crdb.dev/issue-v/35730/dev SET enable_seqscan=OFF; +WARNING: setting session var "enable_seqscan" is a no-op SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; @@ -180664,7 +179618,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite(ts_rewrite('new & !york ', 'york', '!jersey'), 'jersey', 'mexico'); - ts_rewrite @@ -180674,7 +179628,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('moscow', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ---------------------- @@ -180683,7 +179637,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------------------ @@ -180692,7 +179646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------------------------------------------------------------------- @@ -180701,7 +179655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( 'moscow', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ---------------------- @@ -180710,7 +179664,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( 'moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite ------------------------------------ @@ -180719,7 +179673,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( 'bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'); - ts_rewrite -------------------------------------------------------------------------------------- @@ -180728,7 +179682,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('1 & (2 <-> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------- @@ -180737,7 +179691,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('1 & (2 <2> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------- @@ -180746,7 +179700,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('5 <-> (1 & (2 <-> 3))', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite -------------------------- @@ -180755,7 +179709,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite('5 <-> (6 | 8)', 'SELECT keyword, sample FROM test_tsquery'::text ); - ts_rewrite ------------------------ @@ -180764,7 +179718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- Check empty substitution SELECT ts_rewrite(to_tsquery('5 & (6 | 5)'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" @@ -180775,7 +179729,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite(to_tsquery('!5'), to_tsquery('5'), to_tsquery('')); -NOTICE: text-search query doesn't contain lexemes: "" - ts_rewrite @@ -180785,7 +179739,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; - keyword ------------------- @@ -180821,7 +179775,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ @@ -180830,7 +179784,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- @@ -180839,7 +179793,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- @@ -180848,7 +179802,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ @@ -180857,7 +179811,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- @@ -180866,10 +179820,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops); +ERROR: column keyword of type tsquery is not allowed as the last column in an inverted index -+HINT: see the documentation for more information about inverted indexes: https://www.cockroachlabs.com/docs/v24.2/inverted-indexes.html ++HINT: see the documentation for more information about inverted indexes: https://www.cockroachlabs.com/docs/dev/inverted-indexes.html SET enable_seqscan=OFF; +WARNING: setting session var "enable_seqscan" is a no-op SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; @@ -180907,7 +179861,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ @@ -180916,7 +179870,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- @@ -180925,7 +179879,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; - ts_rewrite ---------------------- @@ -180934,7 +179888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; - ts_rewrite ------------------------------------ @@ -180943,7 +179897,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; - ts_rewrite -------------------------------------------------------------------------------------- @@ -180952,7 +179906,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ts_rewrite ------------------------------------------ @@ -180961,7 +179915,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT to_tsvector('foo bar') @@ ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? @@ -180971,7 +179925,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev SELECT to_tsvector('bar baz') @@ ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); - ?column? @@ -180981,26 +179935,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: ts_rewrite(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev RESET enable_seqscan; +WARNING: setting session var "enable_seqscan" is a no-op --test GUC SET default_text_search_config=simple; SELECT to_tsvector('SKIES My booKs'); -@@ -2486,6 +2182,12 @@ +@@ -2486,6 +2182,9 @@ CREATE TRIGGER tsvectorupdate BEFORE UPDATE OR INSERT ON test_tsvector FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t); -+ERROR: at or near "tsvectorupdate": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER tsvectorupdate -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- -@@ -2496,7 +2198,7 @@ +@@ -2496,7 +2195,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- @@ -181009,7 +179960,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab (1 row) UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; -@@ -2510,31 +2212,27 @@ +@@ -2510,31 +2209,27 @@ SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- @@ -181052,7 +180003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab -- test finding items in GIN's pending list create temp table pendtest (ts tsvector); create index pendtest_idx on pendtest using gin(ts); -@@ -2575,6 +2273,7 @@ +@@ -2575,6 +2270,7 @@ insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.')); create index phrase_index_test_idx on phrase_index_test using gin(fts); set enable_seqscan = off; @@ -181060,7 +180011,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat'); fts ----------------------------------- -@@ -2582,426 +2281,287 @@ +@@ -2582,426 +2278,287 @@ (1 row) set enable_seqscan = on; @@ -181074,7 +180025,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); - websearch_to_tsquery ------------------------ @@ -181083,7 +180034,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); - websearch_to_tsquery ------------------------------------------ @@ -181092,7 +180043,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat:A : cat:B'); - websearch_to_tsquery ---------------------------- @@ -181101,7 +180052,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat*rat'); - websearch_to_tsquery ----------------------- @@ -181110,7 +180061,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat-rat'); - websearch_to_tsquery -------------------------------- @@ -181119,7 +180070,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat_rat'); - websearch_to_tsquery ----------------------- @@ -181128,7 +180079,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- weights are completely ignored select websearch_to_tsquery('simple', 'abc : def'); - websearch_to_tsquery @@ -181138,7 +180089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc:def'); - websearch_to_tsquery ----------------------- @@ -181147,7 +180098,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'a:::b'); - websearch_to_tsquery ----------------------- @@ -181156,7 +180107,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc:d'); - websearch_to_tsquery ----------------------- @@ -181165,7 +180116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', ':'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery @@ -181175,7 +180126,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- these operators are ignored select websearch_to_tsquery('simple', 'abc & def'); - websearch_to_tsquery @@ -181185,7 +180136,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc | def'); - websearch_to_tsquery ----------------------- @@ -181194,7 +180145,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc <-> def'); - websearch_to_tsquery ----------------------- @@ -181203,7 +180154,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc (pg or class)'); - websearch_to_tsquery ------------------------- @@ -181212,7 +180163,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- NOT is ignored in quotes select websearch_to_tsquery('english', 'My brand new smartphone'); - websearch_to_tsquery @@ -181222,7 +180173,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'My brand "new smartphone"'); - websearch_to_tsquery ---------------------------------- @@ -181231,7 +180182,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'My brand "new -smartphone"'); - websearch_to_tsquery ---------------------------------- @@ -181240,7 +180191,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- test OR operator select websearch_to_tsquery('simple', 'cat or rat'); - websearch_to_tsquery @@ -181250,7 +180201,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'cat OR rat'); - websearch_to_tsquery ----------------------- @@ -181259,7 +180210,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'cat "OR" rat'); - websearch_to_tsquery ----------------------- @@ -181268,7 +180219,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'cat OR'); - websearch_to_tsquery ----------------------- @@ -181277,7 +180228,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'OR rat'); - websearch_to_tsquery ----------------------- @@ -181286,7 +180237,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', '"fat cat OR rat"'); - websearch_to_tsquery ------------------------------------- @@ -181295,7 +180246,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat (cat OR rat'); - websearch_to_tsquery ------------------------ @@ -181304,7 +180255,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'or OR or'); - websearch_to_tsquery ----------------------- @@ -181313,7 +180264,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- OR is an operator here ... select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); - websearch_to_tsquery @@ -181323,7 +180274,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or(rat'); - websearch_to_tsquery ----------------------- @@ -181332,7 +180283,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or)rat'); - websearch_to_tsquery ----------------------- @@ -181341,7 +180292,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or&rat'); - websearch_to_tsquery ----------------------- @@ -181350,7 +180301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or|rat'); - websearch_to_tsquery ----------------------- @@ -181359,7 +180310,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or!rat'); - websearch_to_tsquery ----------------------- @@ -181368,7 +180319,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat orrat'); - websearch_to_tsquery ----------------------- @@ -181386,7 +180337,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'fat or '); - websearch_to_tsquery ----------------------- @@ -181395,7 +180346,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- ... but not here select websearch_to_tsquery('simple', 'abc orange'); - websearch_to_tsquery @@ -181405,7 +180356,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc OR1234'); - websearch_to_tsquery ----------------------- @@ -181414,7 +180365,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc or-abc'); - websearch_to_tsquery -------------------------------------- @@ -181423,7 +180374,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('simple', 'abc OR_abc'); - websearch_to_tsquery ------------------------- @@ -181432,7 +180383,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- test quotes select websearch_to_tsquery('english', '"pg_class pg'); - websearch_to_tsquery @@ -181442,7 +180393,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'pg_class pg"'); - websearch_to_tsquery -------------------------- @@ -181451,7 +180402,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"pg_class pg"'); - websearch_to_tsquery ---------------------------- @@ -181460,7 +180411,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"pg_class : pg"'); - websearch_to_tsquery ---------------------------- @@ -181469,7 +180420,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'abc "pg_class pg"'); - websearch_to_tsquery ------------------------------------ @@ -181478,7 +180429,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"pg_class pg" def'); - websearch_to_tsquery ------------------------------------ @@ -181487,7 +180438,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); - websearch_to_tsquery ----------------------------------------------------- @@ -181496,7 +180447,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); - websearch_to_tsquery ------------------------------------- @@ -181505,7 +180456,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '""pg pg_class pg""'); - websearch_to_tsquery --------------------------------- @@ -181514,7 +180465,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'abc """"" def'); - websearch_to_tsquery ----------------------- @@ -181523,7 +180474,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'cat -"fat rat"'); - websearch_to_tsquery ------------------------------- @@ -181532,7 +180483,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); - websearch_to_tsquery ----------------------------------------- @@ -181541,7 +180492,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'abc "def -"'); - websearch_to_tsquery ----------------------- @@ -181550,7 +180501,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'abc "def :"'); - websearch_to_tsquery ----------------------- @@ -181559,7 +180510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); - websearch_to_tsquery ------------------------------------- @@ -181568,7 +180519,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); - websearch_to_tsquery ------------------------------------ @@ -181577,7 +180528,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); - websearch_to_tsquery ------------------------------------- @@ -181586,7 +180537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'this is ----fine'); - websearch_to_tsquery ----------------------- @@ -181595,7 +180546,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); - websearch_to_tsquery ----------------------------------------- @@ -181604,7 +180555,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); - websearch_to_tsquery ------------------------- @@ -181613,7 +180564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"A the" OR just on'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery @@ -181623,7 +180574,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('english', '"a fat cat" ate a rat'); - websearch_to_tsquery ---------------------------------- @@ -181632,7 +180583,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select to_tsvector('english', 'A fat cat ate a rat') @@ websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? @@ -181642,7 +180593,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select to_tsvector('english', 'A fat grey cat ate a rat') @@ websearch_to_tsquery('english', '"a fat cat" ate a rat'); - ?column? @@ -181652,7 +180603,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- cases handled by gettoken_tsvector() select websearch_to_tsquery(''''); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored @@ -181663,7 +180614,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('''abc''''def'''); - websearch_to_tsquery ----------------------- @@ -181672,7 +180623,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('\abc'); - websearch_to_tsquery ----------------------- @@ -181681,7 +180632,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select websearch_to_tsquery('\'); -NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - websearch_to_tsquery @@ -181691,7 +180642,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/tsearch.out --lab - +ERROR: websearch_to_tsquery(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/cluster.out --label=/mnt/data1/postgres/src/test/regress/results/cluster.out /mnt/data1/postgres/src/test/regress/expected/cluster.out /mnt/data1/postgres/src/test/regress/results/cluster.out --- /mnt/data1/postgres/src/test/regress/expected/cluster.out +++ /mnt/data1/postgres/src/test/regress/results/cluster.out @@ -181699,13 +180650,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/cluster.out --lab -- CREATE TABLE clstr_tst_s (rf_a SERIAL PRIMARY KEY, b INT); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE clstr_tst (a SERIAL PRIMARY KEY, b INT, c TEXT, d TEXT, CONSTRAINT clstr_tst_con FOREIGN KEY (b) REFERENCES clstr_tst_s); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE INDEX clstr_tst_b ON clstr_tst (b); CREATE INDEX clstr_tst_c ON clstr_tst (c); CREATE INDEX clstr_tst_c_b ON clstr_tst (c,b); @@ -181718,7 +180669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/cluster.out --lab +CREATE TABLE clstr_tst_inh () INHERITS (clstr_tst) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO clstr_tst (b, c) VALUES (11, 'once'); +ERROR: insert on table "clstr_tst" violates foreign key constraint "clstr_tst_con" +DETAIL: Key (b)=(11) is not present in table "clstr_tst_s". @@ -182158,7 +181109,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/cluster.out --lab +SET SESSION AUTHORIZATION regress_clstr_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CLUSTER; +ERROR: at or near "cluster": syntax error +DETAIL: source SQL: @@ -182423,7 +181374,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/cluster.out --lab +SET SESSION AUTHORIZATION regress_ptnowner + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CLUSTER ptnowner USING ptnowner_i_idx; +ERROR: at or near "cluster": syntax error +DETAIL: source SQL: @@ -182717,7 +181668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out LANGUAGE C; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev -- Clean up in case a prior regression run failed -- Suppress NOTICE messages when roles don't exist SET client_min_messages TO 'warning'; @@ -182734,7 +181685,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out +SET SESSION AUTHORIZATION 'regress_foreign_data_user' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE ROLE regress_test_role; CREATE ROLE regress_test_role2; CREATE ROLE regress_test_role_super SUPERUSER; @@ -183792,9 +182743,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out + +If you would rather not post publicly, please contact us directly +using the support form. -+ -+We appreciate your feedback. ++We appreciate your feedback. ++ +\des+ +ERROR: column "s.tableoid" does not exist SET ROLE regress_test_role; @@ -183914,10 +182865,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. -+ + +If you would rather not post publicly, please contact us directly +using the support form. - ++ +We appreciate your feedback. + +\des+ @@ -185769,7 +184720,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out RESET ROLE; -- has_foreign_data_wrapper_privilege -@@ -1096,299 +2460,678 @@ +@@ -1096,299 +2460,653 @@ (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); has_foreign_data_wrapper_privilege ------------------------------------ @@ -186466,27 +185417,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out RETURN NULL; END $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 FOR EACH STATEMENT EXECUTE PROCEDURE dummy_trigger(); -+ERROR: at or near "trigtest_before_stmt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 FOR EACH STATEMENT EXECUTE PROCEDURE dummy_trigger(); -+ERROR: at or near "trigtest_after_stmt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR ON foreign_schema.foreign_table_1 REFERENCING NEW TABLE AS new_table @@ -186494,32 +185441,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out EXECUTE PROCEDURE dummy_trigger(); -ERROR: "foreign_table_1" is a foreign table -DETAIL: Triggers on foreign tables cannot have transition tables. -+ERROR: at or near "trigtest_after_stmt_tt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 FOR EACH ROW EXECUTE PROCEDURE dummy_trigger(); -+ERROR: at or near "trigtest_before_row": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 FOR EACH ROW EXECUTE PROCEDURE dummy_trigger(); -+ERROR: at or near "trigtest_after_row": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 FOR EACH ROW @@ -186531,7 +185469,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out +CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev ALTER FOREIGN TABLE foreign_schema.foreign_table_1 DISABLE TRIGGER trigtest_before_stmt; +ERROR: at or near "foreign": syntax error @@ -186547,39 +185485,27 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out + ^ +HINT: try \h ALTER DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1; -+ERROR: at or near "trigtest_before_stmt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1; -+ERROR: at or near "trigtest_before_row": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1; -+ERROR: at or near "trigtest_after_stmt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; -+ERROR: at or near "trigtest_after_row": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1 -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DROP FUNCTION dummy_trigger(); +ERROR: unknown function: dummy_trigger() -- Table inheritance CREATE TABLE fd_pt1 ( c1 integer NOT NULL, -@@ -1397,113 +3140,181 @@ +@@ -1397,113 +3115,181 @@ ); CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); @@ -186656,9 +185582,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +\d+ fd_pt1 +ERROR: at or near ".": syntax error +DETAIL: source SQL: @@ -186695,9 +185621,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +\d+ ft2 +ERROR: at or near ".": syntax error +DETAIL: source SQL: @@ -186758,7 +185684,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out +CREATE TABLE ct3() INHERITS(ft2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE FOREIGN TABLE ft3 ( c1 integer NOT NULL, c2 text, @@ -186809,9 +185735,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out -Inherits: ft2 +If you would rather not post publicly, please contact us directly +using the support form. -+ -+We appreciate your feedback. ++We appreciate your feedback. ++ +\d+ ft2 +ERROR: at or near ".": syntax error +DETAIL: source SQL: @@ -186845,7 +185771,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out -- add attributes recursively ALTER TABLE fd_pt1 ADD COLUMN c4 integer; ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; -@@ -1511,111 +3322,109 @@ +@@ -1511,111 +3297,109 @@ ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; ALTER TABLE fd_pt1 ADD COLUMN c8 integer; \d+ fd_pt1 @@ -186953,17 +185879,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ERROR: "ft2" is not a table +ERROR: ALTER COLUMN TYPE from int to char is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); +ERROR: ALTER COLUMN TYPE from int to char is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; +ERROR: ALTER COLUMN TYPE from int to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; @@ -187043,7 +185969,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out -- drop attributes recursively ALTER TABLE fd_pt1 DROP COLUMN c4; ALTER TABLE fd_pt1 DROP COLUMN c5; -@@ -1623,172 +3432,205 @@ +@@ -1623,172 +3407,205 @@ ALTER TABLE fd_pt1 DROP COLUMN c7; ALTER TABLE fd_pt1 DROP COLUMN c8; \d+ fd_pt1 @@ -187375,7 +186301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_data.out -- changes name of an attribute recursively ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; -@@ -1796,297 +3638,606 @@ +@@ -1796,297 +3613,606 @@ -- changes name of a constraint recursively ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; \d+ fd_pt1 @@ -188834,7 +187760,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- Again, so should this... CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented @@ -188844,7 +187770,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- This fails because we mixed up the column ordering CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented @@ -188892,7 +187818,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table fktable (ftest1 int references pktable(base1)); +ERROR: relation "pktable" does not exist -- now some ins, upd, del @@ -188983,7 +187909,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + pktable(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1); +ERROR: relation "pktable" does not exist insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1); @@ -189023,7 +187949,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +create table pktable(ptest1 inet, primary key(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- just generally bad types (with and without column references on the referenced table) create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented @@ -189033,7 +187959,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1)); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "base1" are of incompatible types: cidr and integer. @@ -189042,7 +187968,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev -- let's mix up which columns reference which create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable); -ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented @@ -189071,7 +187997,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + pktable(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references pktable(ptest1, base1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented @@ -189082,7 +188008,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + pktable(ptest1, base1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented @@ -189093,7 +188019,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + pktable(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references pktable(base1, ptest1)) inherits (pktable_base); -ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented @@ -189104,7 +188030,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + pktable(base1, ptest1)) inherits (pktable_base) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev drop table pktable; -ERROR: table "pktable" does not exist +ERROR: relation "pktable" does not exist @@ -189328,11 +188254,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - -- text is compatible with varchar ALTER TABLE fktable ADD CONSTRAINT fk_4_2 FOREIGN KEY (x4) REFERENCES pktable(id2); -+NOTICE: type of foreign key column "x4" (string) is not identical to referenced column "pktable"."id2" (varchar) ++NOTICE: type of foreign key column "x4" (STRING) is not identical to referenced column "pktable"."id2" (VARCHAR(4)) -- int2 is part of integer opfamily as of 8.0 ALTER TABLE fktable ADD CONSTRAINT fk_5_1 FOREIGN KEY (x5) REFERENCES pktable(id1); -+NOTICE: type of foreign key column "x5" (int2) is not identical to referenced column "pktable"."id1" (int4) ++NOTICE: type of foreign key column "x5" (INT2) is not identical to referenced column "pktable"."id1" (INT4) -- check multikey cases, especially out-of-order column lists -- these should work ALTER TABLE fktable ADD CONSTRAINT fk_123_123 @@ -189340,7 +188266,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - FOREIGN KEY (x2,x1,x3) REFERENCES pktable(id2,id1,id3); ALTER TABLE fktable ADD CONSTRAINT fk_253_213 FOREIGN KEY (x2,x5,x3) REFERENCES pktable(id2,id1,id3); -+NOTICE: type of foreign key column "x5" (int2) is not identical to referenced column "pktable"."id1" (int4) ++NOTICE: type of foreign key column "x5" (INT2) is not identical to referenced column "pktable"."id1" (INT4) -- these should fail ALTER TABLE fktable ADD CONSTRAINT fk_123_231 FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id2,id3,id1); @@ -189436,7 +188362,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey DEFERRABLE INITIALLY IMMEDIATE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev BEGIN; -- doesn't match FK, should throw error now UPDATE pktable SET id = 10 WHERE id = 5; @@ -189457,7 +188383,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev -- illegal option ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED; -ERROR: constraint declared INITIALLY DEFERRED must be DEFERRABLE @@ -189468,7 +188394,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev -- test order of firing of FK triggers when several RI-induced changes need to -- be made to the same row. This was broken by subtransaction-related -- changes in 8.0. @@ -190332,7 +189258,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +ERROR: at or near "attach": syntax error @@ -191538,7 +190464,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev INSERT INTO pt VALUES(1,2,3); INSERT INTO ref VALUES(1,2,3); +ERROR: relation "ref" does not exist @@ -191602,7 +190528,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY IMMEDIATE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey DEFERRABLE INITIALLY DEFERRED; +ERROR: at or near "deferrable": syntax error: unimplemented: this syntax @@ -191611,7 +190537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev BEGIN; DELETE FROM pt; DELETE FROM ref; @@ -191650,7 +190576,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev INSERT INTO pt VALUES(1,2,3); +ERROR: duplicate key value violates unique constraint "pt_pkey" +DETAIL: Key (f1, f2)=(1, 2) already exists. @@ -191704,7 +190630,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey DEFERRABLE INITIALLY DEFERRED; +ERROR: at or near "deferrable": syntax error: unimplemented: this syntax @@ -191713,7 +190639,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - + DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev INSERT INTO pt VALUES(1,2,3); +ERROR: relation "pt" does not exist INSERT INTO ref VALUES(1,2,3); @@ -192306,54 +191232,45 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/foreign_key.out - -- check that regular and deferrable AR triggers on the PK tables -- still work as expected CREATE FUNCTION fkpart11.print_row () RETURNS TRIGGER LANGUAGE plpgsql AS $$ -@@ -2896,24 +3882,53 @@ +@@ -2896,24 +3882,44 @@ RETURN NULL; END; $$; +ERROR: cannot create "fkpart11.print_row" because the target database or schema does not exist +HINT: verify that the current database and search_path are valid and/or the target database exists CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -+ERROR: at or near "trig_upd_pk": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -+ERROR: at or near "trig_del_pk": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); -+ERROR: at or near "trig_ins_pk": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +ERROR: at or near "trig_upd_fk_parted": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +ERROR: at or near "trig_del_fk_parted": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +ERROR: at or near "trig_ins_fk_parted": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row() + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/28296/dev UPDATE fkpart11.pk SET a = 3 WHERE a = 4; -NOTICE: TABLE: pk3, OP: DELETE, OLD: (4,yyy), NEW: -NOTICE: TABLE: pk2, OP: INSERT, OLD: , NEW: (3,yyy) @@ -192639,35 +191556,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +SELECT '"\u"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '"\u00"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '"\u000g"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '"\u0000"'::jsonpath; -- OK, legal escape +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '"\u0000"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '"\uaBcD"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +-- handling of unicode surrogate pairs +select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -192675,35 +191592,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"\ud83d\ud83d"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"\ude04\ud83d"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"\ud83dX"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"\ude04X"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +--handling of simple unicode escapes +select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -192711,35 +191628,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"dollar \u0024 character"'::jsonpath as correct_everywhere; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"dollar \u0024 character"'::jsonpath as correct_everywhere + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"dollar \\u0024 character"'::jsonpath as not_an_escape; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"dollar \\u0024 character"'::jsonpath as not_an_escape + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"null \u0000 escape"'::jsonpath as not_unescaped + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '"null \\u0000 escape"'::jsonpath as not_an_escape; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '"null \\u0000 escape"'::jsonpath as not_an_escape + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +-- checks for quoted key names +-- basic unicode input +SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape @@ -192748,35 +191665,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +SELECT '$."\u"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '$."\u00"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '$."\u000g"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '$."\u0000"'::jsonpath; -- OK, legal escape +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '$."\u0000"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +SELECT '$."\uaBcD"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +-- handling of unicode surrogate pairs +select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -192784,35 +191701,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."\ud83d\ud83d"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."\ude04\ud83d"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."\ud83dX"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."\ude04X"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +--handling of simple unicode escapes +select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -192820,35 +191737,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath_encoding +select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."dollar \u0024 character"'::jsonpath as correct_everywhere; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."dollar \u0024 character"'::jsonpath as correct_everywhere + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."dollar \\u0024 character"'::jsonpath as not_an_escape; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."dollar \\u0024 character"'::jsonpath as not_an_escape + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."null \u0000 escape"'::jsonpath as not_unescaped + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev +select '$."null \\u0000 escape"'::jsonpath as not_an_escape; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +select '$."null \\u0000 escape"'::jsonpath as not_an_escape + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --label=/mnt/data1/postgres/src/test/regress/results/jsonpath.out /mnt/data1/postgres/src/test/regress/expected/jsonpath.out /mnt/data1/postgres/src/test/regress/results/jsonpath.out --- /mnt/data1/postgres/src/test/regress/expected/jsonpath.out +++ /mnt/data1/postgres/src/test/regress/results/jsonpath.out @@ -192863,7 +191780,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select ''::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$'::jsonpath; - jsonpath ----------- @@ -192875,7 +191792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select 'strict $'::jsonpath; - jsonpath ----------- @@ -192887,7 +191804,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select 'strict $'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select 'lax $'::jsonpath; - jsonpath ----------- @@ -192899,7 +191816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select 'lax $'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a'::jsonpath; - jsonpath ----------- @@ -192911,7 +191828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.v'::jsonpath; - jsonpath ------------ @@ -192923,7 +191840,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.v'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.*'::jsonpath; - jsonpath ----------- @@ -192935,7 +191852,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.*'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.*[*]'::jsonpath; - jsonpath ----------- @@ -192947,7 +191864,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.*[*]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a[*]'::jsonpath; - jsonpath ----------- @@ -192959,7 +191876,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a[*]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a[*][*]'::jsonpath; - jsonpath -------------- @@ -192971,7 +191888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a[*][*]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[*]'::jsonpath; - jsonpath ----------- @@ -192983,7 +191900,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[*]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[0]'::jsonpath; - jsonpath ----------- @@ -192995,7 +191912,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[0]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[*][0]'::jsonpath; - jsonpath ----------- @@ -193007,7 +191924,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[*][0]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[*].a'::jsonpath; - jsonpath ----------- @@ -193019,7 +191936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[*].a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[*][0].a.b'::jsonpath; - jsonpath ------------------ @@ -193031,7 +191948,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[*][0].a.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**.b'::jsonpath; - jsonpath --------------- @@ -193043,7 +191960,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{2}.b'::jsonpath; - jsonpath ------------------ @@ -193055,7 +191972,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{2}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{2 to 2}.b'::jsonpath; - jsonpath ------------------ @@ -193067,7 +191984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{2 to 2}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{2 to 5}.b'::jsonpath; - jsonpath ----------------------- @@ -193079,7 +191996,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{2 to 5}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{0 to 5}.b'::jsonpath; - jsonpath ----------------------- @@ -193091,7 +192008,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{0 to 5}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{5 to last}.b'::jsonpath; - jsonpath -------------------------- @@ -193103,7 +192020,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{5 to last}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{last}.b'::jsonpath; - jsonpath --------------------- @@ -193115,7 +192032,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{last}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a.**{last to 5}.b'::jsonpath; - jsonpath -------------------------- @@ -193127,7 +192044,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a.**{last to 5}.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$+1'::jsonpath; - jsonpath ----------- @@ -193139,7 +192056,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$+1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$-1'::jsonpath; - jsonpath ----------- @@ -193151,7 +192068,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$-1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$--+1'::jsonpath; - jsonpath ----------- @@ -193163,7 +192080,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$--+1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a/+-1'::jsonpath; - jsonpath --------------- @@ -193175,7 +192092,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a/+-1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1 * 2 + 4 % -3 != false'::jsonpath; - jsonpath ---------------------------- @@ -193187,7 +192104,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1 * 2 + 4 % -3 != false'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '"\b\f\r\n\t\v\"\''\\"'::jsonpath; - jsonpath -------------------------- @@ -193199,7 +192116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '"\b\f\r\n\t\v\"\''\\"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath; - jsonpath ----------- @@ -193211,7 +192128,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath; - jsonpath ---------------------- @@ -193223,7 +192140,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '"\z"'::jsonpath; -- unrecognized escape is just the literal char - jsonpath ----------- @@ -193235,7 +192152,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '"\z"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? ($.a == 1)'::jsonpath; - jsonpath --------------------- @@ -193247,7 +192164,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? ($.a == 1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@ == 1)'::jsonpath; - jsonpath ----------------- @@ -193259,7 +192176,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@ == 1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1)'::jsonpath; - jsonpath --------------------- @@ -193271,7 +192188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath; - jsonpath ----------------------------------- @@ -193283,7 +192200,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath; - jsonpath ----------------------------------- @@ -193295,7 +192212,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath; - jsonpath ------------------------------------------------- @@ -193307,7 +192224,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath; - jsonpath ---------------------------------------------------- @@ -193319,7 +192236,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath; - jsonpath -------------------------------------------------------------------- @@ -193331,7 +192248,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath; - jsonpath ---------------------------------------- @@ -193343,7 +192260,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath; - jsonpath -------------------------------------------------- @@ -193355,7 +192272,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (exists (@.x))'::jsonpath; - jsonpath ------------------------- @@ -193367,7 +192284,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (exists (@.x))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ----------------------------------- @@ -193379,7 +192296,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath; - jsonpath ------------------------------------------------------------------- @@ -193391,7 +192308,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath; - jsonpath ------------------------------------- @@ -193403,7 +192320,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$a'::jsonpath; - jsonpath ----------- @@ -193415,7 +192332,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$a.b'::jsonpath; - jsonpath ----------- @@ -193427,7 +192344,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$a.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$a[*]'::jsonpath; - jsonpath ----------- @@ -193439,7 +192356,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$a[*]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.g ? (@.zip == $zip)'::jsonpath; - jsonpath ---------------------------- @@ -193451,7 +192368,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.g ? (@.zip == $zip)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a[1,2, 3 to 16]'::jsonpath; - jsonpath --------------------- @@ -193463,7 +192380,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a[1,2, 3 to 16]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath; - jsonpath ----------------------------------------- @@ -193475,7 +192392,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.a[$.a.size() - 3]'::jsonpath; - jsonpath -------------------------- @@ -193487,7 +192404,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.a[$.a.size() - 3]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select 'last'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select 'last'::jsonpath; @@ -193497,7 +192414,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select 'last'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '"last"'::jsonpath; - jsonpath ----------- @@ -193509,7 +192426,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '"last"'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.last'::jsonpath; - jsonpath ----------- @@ -193521,7 +192438,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.last'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (last > 0)'::jsonpath; -ERROR: LAST is allowed only in array subscripts -LINE 1: select '$ ? (last > 0)'::jsonpath; @@ -193531,7 +192448,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (last > 0)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[last]'::jsonpath; - jsonpath ----------- @@ -193543,7 +192460,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[last]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$[$[0] ? (last > 0)]'::jsonpath; - jsonpath --------------------- @@ -193555,7 +192472,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$[$[0] ? (last > 0)]'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select 'null.type()'::jsonpath; - jsonpath -------------- @@ -193567,7 +192484,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select 'null.type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.type()'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input -LINE 1: select '1.type()'::jsonpath; @@ -193577,7 +192494,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(1).type()'::jsonpath; - jsonpath ------------- @@ -193589,7 +192506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(1).type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2.type()'::jsonpath; - jsonpath --------------- @@ -193601,7 +192518,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2.type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '"aaa".type()'::jsonpath; - jsonpath --------------- @@ -193613,7 +192530,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '"aaa".type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select 'true.type()'::jsonpath; - jsonpath -------------- @@ -193625,7 +192542,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select 'true.type()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.double().floor().ceiling().abs()'::jsonpath; - jsonpath ------------------------------------- @@ -193637,7 +192554,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.double().floor().ceiling().abs()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.keyvalue().key'::jsonpath; - jsonpath --------------------- @@ -193649,7 +192566,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.keyvalue().key'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.datetime()'::jsonpath; - jsonpath --------------- @@ -193661,7 +192578,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.datetime()'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$.datetime("datetime template")'::jsonpath; - jsonpath ---------------------------------- @@ -193673,7 +192590,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$.datetime("datetime template")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ starts with "abc")'::jsonpath; - jsonpath -------------------------- @@ -193685,7 +192602,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ starts with "abc")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ starts with $var)'::jsonpath; - jsonpath --------------------------- @@ -193697,7 +192614,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ starts with $var)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; -ERROR: invalid regular expression: parentheses () not balanced -LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; @@ -193707,7 +192624,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "(invalid pattern")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern")'::jsonpath; - jsonpath ----------------------------- @@ -193719,7 +192636,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "")'::jsonpath; - jsonpath ----------------------------- @@ -193731,7 +192648,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath; - jsonpath -------------------------------------- @@ -193743,7 +192660,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath; - jsonpath --------------------------------------- @@ -193755,7 +192672,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath; - jsonpath ---------------------------------------- @@ -193767,7 +192684,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; -ERROR: XQuery "x" flag (expanded regular expressions) is not implemented -LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; @@ -193777,7 +192694,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath; - jsonpath -------------------------------------- @@ -193789,7 +192706,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath; - jsonpath --------------------------------------- @@ -193801,7 +192718,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath; - jsonpath ------------------------------------------ @@ -193813,7 +192730,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; -ERROR: invalid input syntax for type jsonpath -LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; @@ -193824,7 +192741,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ < 1'::jsonpath; - jsonpath ----------- @@ -193836,7 +192753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ < 1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '($ < 1) || $.a.b <= $x'::jsonpath; - jsonpath ------------------------------- @@ -193848,7 +192765,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '($ < 1) || $.a.b <= $x'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '@ + 1'::jsonpath; -ERROR: @ is not allowed in root expressions -LINE 1: select '@ + 1'::jsonpath; @@ -193858,7 +192775,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '@ + 1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '($).a.b'::jsonpath; - jsonpath ------------ @@ -193870,7 +192787,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '($).a.b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '($.a.b).c.d'::jsonpath; - jsonpath -------------------- @@ -193882,7 +192799,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '($.a.b).c.d'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '($.a.b + -$.x.y).c.d'::jsonpath; - jsonpath ----------------------------------- @@ -193894,7 +192811,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '($.a.b + -$.x.y).c.d'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(-+$.a.b).c.d'::jsonpath; - jsonpath -------------------------- @@ -193906,7 +192823,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(-+$.a.b).c.d'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1 + ($.a.b + 2).c.d'::jsonpath; - jsonpath -------------------------------- @@ -193918,7 +192835,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1 + ($.a.b + 2).c.d'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1 + ($.a.b > 2).c.d'::jsonpath; - jsonpath -------------------------------- @@ -193930,7 +192847,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1 + ($.a.b > 2).c.d'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '($)'::jsonpath; - jsonpath ----------- @@ -193942,7 +192859,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '($)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(($))'::jsonpath; - jsonpath ----------- @@ -193954,7 +192871,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(($))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath; - jsonpath ---------------------------------------------------- @@ -193966,7 +192883,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 1)'::jsonpath; - jsonpath ---------------- @@ -193978,7 +192895,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -1)'::jsonpath; - jsonpath ----------------- @@ -193990,7 +192907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +1)'::jsonpath; - jsonpath ---------------- @@ -194002,7 +192919,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < .1)'::jsonpath; - jsonpath ------------------ @@ -194014,7 +192931,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < .1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -.1)'::jsonpath; - jsonpath ------------------- @@ -194026,7 +192943,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +.1)'::jsonpath; - jsonpath ------------------ @@ -194038,7 +192955,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 0.1)'::jsonpath; - jsonpath ------------------ @@ -194050,7 +192967,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 0.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -0.1)'::jsonpath; - jsonpath ------------------- @@ -194062,7 +192979,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -0.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +0.1)'::jsonpath; - jsonpath ------------------ @@ -194074,7 +192991,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +0.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 10.1)'::jsonpath; - jsonpath ------------------- @@ -194086,7 +193003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 10.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -10.1)'::jsonpath; - jsonpath -------------------- @@ -194098,7 +193015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -10.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +10.1)'::jsonpath; - jsonpath ------------------- @@ -194110,7 +193027,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +10.1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 1e1)'::jsonpath; - jsonpath ----------------- @@ -194122,7 +193039,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -1e1)'::jsonpath; - jsonpath ------------------ @@ -194134,7 +193051,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +1e1)'::jsonpath; - jsonpath ----------------- @@ -194146,7 +193063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < .1e1)'::jsonpath; - jsonpath ---------------- @@ -194158,7 +193075,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < .1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -.1e1)'::jsonpath; - jsonpath ----------------- @@ -194170,7 +193087,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +.1e1)'::jsonpath; - jsonpath ---------------- @@ -194182,7 +193099,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 0.1e1)'::jsonpath; - jsonpath ---------------- @@ -194194,7 +193111,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 0.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -0.1e1)'::jsonpath; - jsonpath ----------------- @@ -194206,7 +193123,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -0.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +0.1e1)'::jsonpath; - jsonpath ---------------- @@ -194218,7 +193135,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +0.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 10.1e1)'::jsonpath; - jsonpath ------------------ @@ -194230,7 +193147,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 10.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -10.1e1)'::jsonpath; - jsonpath ------------------- @@ -194242,7 +193159,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -10.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +10.1e1)'::jsonpath; - jsonpath ------------------ @@ -194254,7 +193171,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +10.1e1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 1e-1)'::jsonpath; - jsonpath ------------------ @@ -194266,7 +193183,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -1e-1)'::jsonpath; - jsonpath ------------------- @@ -194278,7 +193195,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +1e-1)'::jsonpath; - jsonpath ------------------ @@ -194290,7 +193207,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < .1e-1)'::jsonpath; - jsonpath ------------------- @@ -194302,7 +193219,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < .1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -.1e-1)'::jsonpath; - jsonpath -------------------- @@ -194314,7 +193231,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +.1e-1)'::jsonpath; - jsonpath ------------------- @@ -194326,7 +193243,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 0.1e-1)'::jsonpath; - jsonpath ------------------- @@ -194338,7 +193255,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 0.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -0.1e-1)'::jsonpath; - jsonpath -------------------- @@ -194350,7 +193267,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -0.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +0.1e-1)'::jsonpath; - jsonpath ------------------- @@ -194362,7 +193279,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +0.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 10.1e-1)'::jsonpath; - jsonpath ------------------- @@ -194374,7 +193291,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 10.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -10.1e-1)'::jsonpath; - jsonpath -------------------- @@ -194386,7 +193303,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -10.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +10.1e-1)'::jsonpath; - jsonpath ------------------- @@ -194398,7 +193315,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +10.1e-1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 1e+1)'::jsonpath; - jsonpath ----------------- @@ -194410,7 +193327,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -1e+1)'::jsonpath; - jsonpath ------------------ @@ -194422,7 +193339,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +1e+1)'::jsonpath; - jsonpath ----------------- @@ -194434,7 +193351,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < .1e+1)'::jsonpath; - jsonpath ---------------- @@ -194446,7 +193363,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < .1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -.1e+1)'::jsonpath; - jsonpath ----------------- @@ -194458,7 +193375,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +.1e+1)'::jsonpath; - jsonpath ---------------- @@ -194470,7 +193387,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 0.1e+1)'::jsonpath; - jsonpath ---------------- @@ -194482,7 +193399,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 0.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -0.1e+1)'::jsonpath; - jsonpath ----------------- @@ -194494,7 +193411,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -0.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +0.1e+1)'::jsonpath; - jsonpath ---------------- @@ -194506,7 +193423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +0.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < 10.1e+1)'::jsonpath; - jsonpath ------------------ @@ -194518,7 +193435,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < 10.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < -10.1e+1)'::jsonpath; - jsonpath ------------------- @@ -194530,7 +193447,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < -10.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '$ ? (@.a < +10.1e+1)'::jsonpath; - jsonpath ------------------ @@ -194542,7 +193459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '$ ? (@.a < +10.1e+1)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- numeric literals select '0'::jsonpath; - jsonpath @@ -194555,7 +193472,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '00'::jsonpath; -ERROR: trailing junk after numeric literal at or near "00" of jsonpath input -LINE 1: select '00'::jsonpath; @@ -194565,7 +193482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '00'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0755'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0755'::jsonpath; @@ -194575,7 +193492,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0755'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.0'::jsonpath; - jsonpath ----------- @@ -194587,7 +193504,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.0'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.000'::jsonpath; - jsonpath ----------- @@ -194599,7 +193516,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.000'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.000e1'::jsonpath; - jsonpath ----------- @@ -194611,7 +193528,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.000e1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.000e2'::jsonpath; - jsonpath ----------- @@ -194623,7 +193540,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.000e2'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.000e3'::jsonpath; - jsonpath ----------- @@ -194635,7 +193552,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.000e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.0010'::jsonpath; - jsonpath ----------- @@ -194647,7 +193564,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.0010'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.0010e-1'::jsonpath; - jsonpath ----------- @@ -194659,7 +193576,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.0010e-1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.0010e+1'::jsonpath; - jsonpath ----------- @@ -194671,7 +193588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.0010e+1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0.0010e+2'::jsonpath; - jsonpath ----------- @@ -194683,7 +193600,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0.0010e+2'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '.001'::jsonpath; - jsonpath ----------- @@ -194695,7 +193612,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '.001'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '.001e1'::jsonpath; - jsonpath ----------- @@ -194707,7 +193624,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '.001e1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.'::jsonpath; - jsonpath ----------- @@ -194719,7 +193636,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.e1'::jsonpath; - jsonpath ----------- @@ -194731,7 +193648,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.e1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1a" of jsonpath input -LINE 1: select '1a'::jsonpath; @@ -194741,7 +193658,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1e" of jsonpath input -LINE 1: select '1e'::jsonpath; @@ -194751,7 +193668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input -LINE 1: select '1.e'::jsonpath; @@ -194761,7 +193678,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2a" of jsonpath input -LINE 1: select '1.2a'::jsonpath; @@ -194771,7 +193688,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2e'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e" of jsonpath input -LINE 1: select '1.2e'::jsonpath; @@ -194781,7 +193698,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2.e'::jsonpath; - jsonpath ------------ @@ -194793,7 +193710,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2.e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(1.2).e'::jsonpath; - jsonpath ------------ @@ -194805,7 +193722,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(1.2).e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1e3'::jsonpath; - jsonpath ----------- @@ -194817,7 +193734,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.e3'::jsonpath; - jsonpath ----------- @@ -194829,7 +193746,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.e3.e'::jsonpath; - jsonpath ------------- @@ -194841,7 +193758,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.e3.e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.e3.e4'::jsonpath; - jsonpath -------------- @@ -194853,7 +193770,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.e3.e4'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2e3'::jsonpath; - jsonpath ----------- @@ -194865,7 +193782,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2e3a'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input -LINE 1: select '1.2e3a'::jsonpath; @@ -194875,7 +193792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2e3a'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1.2.e3'::jsonpath; - jsonpath ------------- @@ -194887,7 +193804,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1.2.e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(1.2).e3'::jsonpath; - jsonpath ------------- @@ -194899,7 +193816,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(1.2).e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1..e'::jsonpath; - jsonpath ----------- @@ -194911,7 +193828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1..e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1..e3'::jsonpath; - jsonpath ----------- @@ -194923,7 +193840,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1..e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(1.).e'::jsonpath; - jsonpath ----------- @@ -194935,7 +193852,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(1.).e'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '(1.).e3'::jsonpath; - jsonpath ----------- @@ -194947,7 +193864,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '(1.).e3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1?(2>3)'::jsonpath; - jsonpath -------------- @@ -194959,7 +193876,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1?(2>3)'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- nondecimal select '0b100101'::jsonpath; - jsonpath @@ -194972,7 +193889,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0b100101'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0o273'::jsonpath; - jsonpath ----------- @@ -194984,7 +193901,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0o273'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0x42F'::jsonpath; - jsonpath ----------- @@ -194996,7 +193913,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0x42F'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- error cases select '0b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0b" of jsonpath input @@ -195007,7 +193924,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1b'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1b" of jsonpath input -LINE 1: select '1b'::jsonpath; @@ -195017,7 +193934,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1b'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0b0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0b0x'::jsonpath; @@ -195027,7 +193944,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0b0x'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0o" of jsonpath input -LINE 1: select '0o'::jsonpath; @@ -195037,7 +193954,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0o'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1o'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1o" of jsonpath input -LINE 1: select '1o'::jsonpath; @@ -195047,7 +193964,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1o'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0o0x'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o0x'::jsonpath; @@ -195057,7 +193974,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0o0x'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "0x" of jsonpath input -LINE 1: select '0x'::jsonpath; @@ -195067,7 +193984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0x'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1x'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1x" of jsonpath input -LINE 1: select '1x'::jsonpath; @@ -195077,7 +193994,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1x'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0x0y'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x0y'::jsonpath; @@ -195087,7 +194004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0x0y'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- underscores select '1_000_000'::jsonpath; - jsonpath @@ -195100,7 +194017,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000_000'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_2_3'::jsonpath; - jsonpath ----------- @@ -195112,7 +194029,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_2_3'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0x1EEE_FFFF'::jsonpath; - jsonpath ------------ @@ -195124,7 +194041,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0x1EEE_FFFF'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0o2_73'::jsonpath; - jsonpath ----------- @@ -195136,7 +194053,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0o2_73'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0b10_0101'::jsonpath; - jsonpath ----------- @@ -195148,7 +194065,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0b10_0101'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000.000_005'::jsonpath; - jsonpath -------------- @@ -195160,7 +194077,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000.000_005'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000.'::jsonpath; - jsonpath ----------- @@ -195172,7 +194089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000.'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '.000_005'::jsonpath; - jsonpath ----------- @@ -195184,7 +194101,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '.000_005'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000.5e0_1'::jsonpath; - jsonpath ----------- @@ -195196,7 +194113,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000.5e0_1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- error cases select '_100'::jsonpath; -ERROR: syntax error at end of jsonpath input @@ -195207,7 +194124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '_100'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '100_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "100_" of jsonpath input -LINE 1: select '100_'::jsonpath; @@ -195217,7 +194134,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '100_'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '100__000'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '100__000'::jsonpath; @@ -195227,7 +194144,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '100__000'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '_1_000.5'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '_1_000.5'::jsonpath; @@ -195237,7 +194154,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '_1_000.5'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000_.5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000_" of jsonpath input -LINE 1: select '1_000_.5'::jsonpath; @@ -195247,7 +194164,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000_.5'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000._5'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000._" of jsonpath input -LINE 1: select '1_000._5'::jsonpath; @@ -195257,7 +194174,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000._5'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000.5_'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5_" of jsonpath input -LINE 1: select '1_000.5_'::jsonpath; @@ -195267,7 +194184,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000.5_'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '1_000.5e_1'::jsonpath; -ERROR: trailing junk after numeric literal at or near "1_000.5e" of jsonpath input -LINE 1: select '1_000.5e_1'::jsonpath; @@ -195277,7 +194194,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '1_000.5e_1'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- underscore after prefix not allowed in JavaScript (but allowed in SQL) select '0b_10_0101'::jsonpath; -ERROR: syntax error at end of jsonpath input @@ -195288,7 +194205,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0b_10_0101'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0o_273'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0o_273'::jsonpath; @@ -195298,7 +194215,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0o_273'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select '0x_42F'::jsonpath; -ERROR: syntax error at end of jsonpath input -LINE 1: select '0x_42F'::jsonpath; @@ -195308,7 +194225,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonpath.out --la +select '0x_42F'::jsonpath + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- test non-error-throwing API SELECT str as jsonpath, pg_input_is_valid(str,'jsonpath') as ok, @@ -197379,7 +196296,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: array_to_json(): could not parse "{{1,5},{99,100}}" as type int[]: unimplemented: nested arrays not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- row_to_json SELECT row_to_json(row(1,'foo')); - row_to_json @@ -197484,7 +196401,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - "infinity" + to_json +-------------------------------- -+ "294276-12-31T23:59:59.999999" ++ "294277-01-01T23:59:59.999999" (1 row) select to_json(timestamp '-Infinity'); @@ -197493,7 +196410,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - "-infinity" + to_json +------------------------ -+ "-4713-11-24T00:00:00" ++ "-4714-11-23T00:00:00" (1 row) select to_json(timestamptz 'Infinity'); @@ -197502,7 +196419,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - "infinity" + to_json +-------------------------------------- -+ "294276-12-31T15:59:59.999999-08:00" ++ "294277-01-01T15:59:59.999999-08:00" (1 row) select to_json(timestamptz '-Infinity'); @@ -197511,7 +196428,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - "-infinity" + to_json +------------------------------ -+ "-4713-11-23T16:00:00-08:00" ++ "-4714-11-22T16:00:00-08:00" (1 row) --json_agg @@ -197916,21 +196833,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +CREATE DOMAIN js_int_not_null AS int NOT NULL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create type j_unordered_pair as (x int, y int); create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -197938,7 +196855,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TYPE jsrec AS ( i int, ia _int4, @@ -198709,28 +197626,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +DROP DOMAIN js_int_not_null + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN js_int_array_1d; +ERROR: at or near "js_int_array_1d": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN js_int_array_1d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN js_int_array_2d; +ERROR: at or near "js_int_array_2d": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN js_int_array_2d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN j_ordered_pair; +ERROR: at or near "j_ordered_pair": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN j_ordered_pair + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP TYPE j_unordered_pair; --json_typeof() function select value, json_typeof(value) @@ -198911,7 +197828,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_object ----------------------------------------------------------------------------- @@ -199100,7 +198017,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- empty objects/arrays SELECT json_build_array(); json_build_array @@ -199254,7 +198171,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= + as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select *, c is null as c_is_null from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json) as t(a int, b json, c text, x int); @@ -199304,7 +198221,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- @@ -199316,7 +198233,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ @@ -199328,7 +198245,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= +select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from json_to_record('{"out": {"key": 1}}') as x(out json); out ------------ @@ -199443,7 +198360,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- @@ -199452,7 +198369,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- @@ -199461,7 +198378,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- @@ -199470,7 +198387,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- @@ -199479,7 +198396,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- @@ -199488,7 +198405,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); - json_to_tsvector ----------------------------------------------------------------------------------------- @@ -199497,7 +198414,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); - json_to_tsvector --------------------------------- @@ -199506,7 +198423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); - json_to_tsvector ------------------- @@ -199515,7 +198432,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); - json_to_tsvector ------------------- @@ -199524,7 +198441,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); - json_to_tsvector -------------------- @@ -199533,7 +198450,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); - json_to_tsvector ---------------------------------- @@ -199542,7 +198459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- to_tsvector corner cases select to_tsvector('""'::json); - to_tsvector @@ -199585,7 +198502,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('{}'::json, '"all"'); - json_to_tsvector ------------------- @@ -199594,7 +198511,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('[]'::json, '"all"'); - json_to_tsvector ------------------- @@ -199603,7 +198520,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('null'::json, '"all"'); - json_to_tsvector ------------------- @@ -199612,18 +198529,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); - json_to_tsvector ------------------- @@ -199632,19 +198549,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: json_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- ts_headline for json select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline @@ -199654,7 +198571,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); - ts_headline ----------------------------------------------------------------------------------------- @@ -199663,7 +198580,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- @@ -199672,7 +198589,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ------------------------------------------------------------------------------------------- @@ -199681,7 +198598,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- corner cases for ts_headline with json select ts_headline('null'::json, tsquery('aaa & bbb')); - ts_headline @@ -199691,7 +198608,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('{}'::json, tsquery('aaa & bbb')); - ts_headline -------------- @@ -199700,7 +198617,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('[]'::json, tsquery('aaa & bbb')); - ts_headline -------------- @@ -199709,7 +198626,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/json.out --label= - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.out --label=/mnt/data1/postgres/src/test/regress/results/jsonb_jsonpath.out /mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.out /mnt/data1/postgres/src/test/regress/results/jsonb_jsonpath.out --- /mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.out +++ /mnt/data1/postgres/src/test/regress/results/jsonb_jsonpath.out @@ -199918,7 +198835,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: jsonpath array subscript is out of bounds +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1]', 'strict $[1]', silent => true); - jsonb_path_query ------------------- @@ -199953,12 +198870,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: jsonpath array subscript is out of integer range +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1]', 'strict $[10000000000000000]'); -ERROR: jsonpath array subscript is out of integer range +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '[1]' @? '$[0]'; - ?column? ----------- @@ -200146,17 +199063,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'strict $.*'); -ERROR: jsonpath wildcard member accessor can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'strict $.a', silent => true); - jsonb_path_query ------------------- @@ -200184,12 +199101,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $.a'); -ERROR: jsonpath member accessor can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $.a', silent => true); - jsonb_path_query ------------------- @@ -200207,12 +199124,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{}', 'strict $.a'); -ERROR: JSON object does not contain key "a" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{}', 'strict $.a', silent => true); - jsonb_path_query ------------------- @@ -200227,22 +199144,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: jsonpath array accessor can only be applied to an array +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'strict $[*]'); -ERROR: jsonpath wildcard array accessor can only be applied to an array +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $[1]'); -ERROR: jsonpath array subscript is out of bounds +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $["a"]'); -ERROR: jsonpath array subscript is not a single numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'strict $[1]', silent => true); - jsonb_path_query ------------------- @@ -200291,7 +199208,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b'); - jsonb_path_query ------------------- @@ -200300,7 +199217,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*'); - jsonb_path_query ------------------- @@ -200310,7 +199227,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a'); - jsonb_path_query ------------------- @@ -200319,7 +199236,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a'); - jsonb_path_query ------------------- @@ -200328,7 +199245,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*'); - jsonb_path_query ------------------- @@ -200338,7 +199255,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a'); - jsonb_path_query ------------------- @@ -200346,7 +199263,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a'); - jsonb_path_query ------------------- @@ -200355,7 +199272,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a'); - jsonb_path_query ------------------- @@ -200363,7 +199280,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a'); - jsonb_path_query ------------------- @@ -200372,7 +199289,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a'); - jsonb_path_query ------------------- @@ -200381,12 +199298,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a'); -ERROR: division by zero +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]'); - jsonb_path_query ------------------- @@ -200397,7 +199314,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'lax $[0]'); - jsonb_path_query ------------------- @@ -200406,7 +199323,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', 'lax $[*]'); - jsonb_path_query ------------------- @@ -200415,7 +199332,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1]', 'lax $[0]'); - jsonb_path_query ------------------- @@ -200424,7 +199341,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1]', 'lax $[*]'); - jsonb_path_query ------------------- @@ -200433,7 +199350,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', 'lax $[*]'); - jsonb_path_query ------------------- @@ -200444,12 +199361,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', 'strict $[*].a'); -ERROR: jsonpath member accessor can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true); - jsonb_path_query ------------------- @@ -200467,7 +199384,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', '$[last ? (exists(last))]'); - jsonb_path_query ------------------- @@ -200475,12 +199392,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $[last]'); -ERROR: jsonpath array subscript is out of bounds +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $[last]', silent => true); - jsonb_path_query ------------------- @@ -200499,7 +199416,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', '$[last]'); - jsonb_path_query ------------------- @@ -200508,7 +199425,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', '$[last - 1]'); - jsonb_path_query ------------------- @@ -200517,7 +199434,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]'); - jsonb_path_query ------------------- @@ -200526,12 +199443,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]'); -ERROR: jsonpath array subscript is not a single numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true); - jsonb_path_query ------------------- @@ -200550,24 +199467,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)'); -ERROR: could not find jsonpath variable "value" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]'); -ERROR: "vars" argument is not an object -DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- @@ -200576,7 +199493,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}'); - jsonb_path_query ------------------- @@ -200584,7 +199501,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- @@ -200593,7 +199510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}'); - jsonb_path_query ------------------- @@ -200604,7 +199521,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}'); - jsonb_path_query ------------------- @@ -200614,7 +199531,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}'); - jsonb_path_query ------------------- @@ -200625,7 +199542,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")'); - jsonb_path_query ------------------- @@ -200634,7 +199551,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}'); - jsonb_path_query ------------------- @@ -200643,7 +199560,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}'); - jsonb_path_query ------------------- @@ -200652,7 +199569,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)'); - jsonb_path_query ------------------- @@ -200662,7 +199579,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)'); - jsonb_path_query ------------------- @@ -200671,7 +199588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('{}', '$ ? (@ == @)'); - jsonb_path_query ------------------- @@ -200679,7 +199596,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select * from jsonb_path_query('[]', 'strict $ ? (@ == @)'); - jsonb_path_query ------------------- @@ -200687,7 +199604,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**'); - jsonb_path_query ------------------- @@ -200698,7 +199615,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}'); - jsonb_path_query ------------------- @@ -200707,7 +199624,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}'); - jsonb_path_query ------------------- @@ -200718,7 +199635,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}'); - jsonb_path_query ------------------- @@ -200727,7 +199644,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}'); - jsonb_path_query ------------------- @@ -200737,7 +199654,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}'); - jsonb_path_query ------------------- @@ -200746,7 +199663,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}'); - jsonb_path_query ------------------- @@ -200755,7 +199672,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}'); - jsonb_path_query ------------------- @@ -200763,7 +199680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}'); - jsonb_path_query ------------------- @@ -200772,7 +199689,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200781,7 +199698,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200789,7 +199706,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200798,7 +199715,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200807,7 +199724,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200816,7 +199733,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200825,7 +199742,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200834,7 +199751,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200842,7 +199759,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200850,7 +199767,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200859,7 +199776,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200868,7 +199785,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200877,7 +199794,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)'); - jsonb_path_query ------------------- @@ -200886,7 +199803,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)'; - ?column? ----------- @@ -201025,7 +199942,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))'); - jsonb_path_query ------------------- @@ -201033,7 +199950,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))'); - jsonb_path_query ------------------- @@ -201042,7 +199959,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))'); - jsonb_path_query ------------------- @@ -201051,7 +199968,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))'); - jsonb_path_query ------------------- @@ -201059,7 +199976,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)'); - jsonb_path_query ------------------- @@ -201069,7 +199986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))'); - jsonb_path_query ------------------- @@ -201078,7 +199995,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)'); - jsonb_path_query ------------------- @@ -201087,7 +200004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))'); - jsonb_path_query ------------------- @@ -201095,7 +200012,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)'); - jsonb_path_query ----------------------- @@ -201104,7 +200021,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev --test ternary logic select x, y, @@ -201225,7 +200142,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))'); - jsonb_path_query ------------------- @@ -201234,7 +200151,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)'); - jsonb_path_query ------------------- @@ -201243,7 +200160,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))'); - jsonb_path_query ------------------- @@ -201252,7 +200169,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)'; - ?column? ----------- @@ -201384,7 +200301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)'); - jsonb_path_query ------------------- @@ -201393,42 +200310,42 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('0', '1 / $'); -ERROR: division by zero +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('0', '1 / $ + 2'); -ERROR: division by zero +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('0', '-(3 + 1 % $)'); -ERROR: division by zero +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', '$ + "2"'); -ERROR: right operand of jsonpath operator + is not a single numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1, 2]', '3 * $'); -ERROR: right operand of jsonpath operator * is not a single numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"a"', '-$'); -ERROR: operand of unary jsonpath operator - is not a numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,"2",3]', '+$'); -ERROR: operand of unary jsonpath operator + is not a numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', '$ + "2"', silent => true); - jsonb_path_query ------------------- @@ -201519,7 +200436,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": [2]}', 'lax $.a + 3'); - jsonb_path_query ------------------- @@ -201528,7 +200445,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a'); - jsonb_path_query ------------------- @@ -201539,13 +200456,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- should fail select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3'); -ERROR: left operand of jsonpath operator * is not a single numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true); - jsonb_path_query ------------------- @@ -201565,7 +200482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('2', '$ <= 1'); - jsonb_path_query ------------------- @@ -201574,7 +200491,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('2', '$ == "2"'); - jsonb_path_query ------------------- @@ -201583,7 +200500,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '2' @? '$ == "2"'; - ?column? ----------- @@ -201658,7 +200575,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_match(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}'); - jsonb_path_match ------------------- @@ -201667,7 +200584,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_match(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false); - jsonb_path_match ------------------- @@ -201720,7 +200637,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()'); - jsonb_path_query ------------------- @@ -201729,7 +200646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()'); - jsonb_path_query ------------------- @@ -201743,7 +200660,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', 'null.type()'); - jsonb_path_query ------------------- @@ -201752,7 +200669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', 'true.type()'); - jsonb_path_query ------------------- @@ -201761,7 +200678,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', '(123).type()'); - jsonb_path_query ------------------- @@ -201770,7 +200687,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', '"123".type()'); - jsonb_path_query ------------------- @@ -201779,7 +200696,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10'); - jsonb_path_query ------------------- @@ -201788,7 +200705,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3'); - jsonb_path_query ------------------- @@ -201797,7 +200714,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)'); - jsonb_path_query ------------------- @@ -201806,7 +200723,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()'); - jsonb_path_query ------------------- @@ -201815,7 +200732,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()'); - jsonb_path_query ------------------- @@ -201824,7 +200741,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()'); - jsonb_path_query ------------------- @@ -201833,12 +200750,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()'); -ERROR: jsonpath item method .size() can only be applied to an array +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true); - jsonb_path_query ------------------- @@ -201865,7 +200782,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()'); - jsonb_path_query ------------------- @@ -201878,7 +200795,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()'); - jsonb_path_query ------------------- @@ -201891,7 +200808,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()'); - jsonb_path_query ------------------- @@ -201904,7 +200821,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()'); - jsonb_path_query ------------------- @@ -201917,7 +200834,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()'); - jsonb_path_query ------------------- @@ -201930,12 +200847,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{},1]', '$[*].keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true); - jsonb_path_query ------------------- @@ -201953,7 +200870,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()'); - jsonb_path_query ----------------------------------------------- @@ -201964,7 +200881,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()'); - jsonb_path_query ------------------------------------------------ @@ -201975,12 +200892,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()'); - jsonb_path_query ------------------------------------------------ @@ -201991,12 +200908,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a'); -ERROR: jsonpath item method .keyvalue() can only be applied to an object +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()'; - ?column? ----------- @@ -202021,12 +200938,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: jsonpath item method .double() can only be applied to a string or numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('true', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', '$.double()', silent => true); - jsonb_path_query ------------------- @@ -202054,17 +200971,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{}', '$.double()'); -ERROR: jsonpath item method .double() can only be applied to a string or numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $.double()', silent => true); - jsonb_path_query ------------------- @@ -202093,7 +201010,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"1.23"', '$.double()'); - jsonb_path_query ------------------- @@ -202102,37 +201019,37 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"1.23aaa"', '$.double()'); -ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1e1000', '$.double()'); -ERROR: numeric argument of jsonpath item method .double() is out of range for type double precision +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"nan"', '$.double()'); -ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"NaN"', '$.double()'); -ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"inf"', '$.double()'); -ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"-inf"', '$.double()'); -ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"inf"', '$.double()', silent => true); - jsonb_path_query ------------------- @@ -202157,17 +201074,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: jsonpath item method .abs() can only be applied to a numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('true', '$.floor()'); -ERROR: jsonpath item method .floor() can only be applied to a numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"1.2"', '$.ceiling()'); -ERROR: jsonpath item method .ceiling() can only be applied to a numeric value +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{}', '$.abs()', silent => true); - jsonb_path_query ------------------- @@ -202207,7 +201124,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- @@ -202216,7 +201133,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- @@ -202224,7 +201141,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")'); - jsonb_path_query ------------------- @@ -202232,7 +201149,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- @@ -202241,7 +201158,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")'); - jsonb_path_query ----------------------------- @@ -202250,7 +201167,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)'); - jsonb_path_query ----------------------------- @@ -202259,7 +201176,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)'); - jsonb_path_query ------------------- @@ -202269,7 +201186,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")'); - jsonb_path_query ------------------- @@ -202279,7 +201196,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")'); - jsonb_path_query ------------------- @@ -202290,7 +201207,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")'); - jsonb_path_query ------------------- @@ -202301,7 +201218,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")'); - jsonb_path_query ------------------- @@ -202312,7 +201229,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")'); - jsonb_path_query ------------------- @@ -202322,7 +201239,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")'); - jsonb_path_query ------------------- @@ -202331,7 +201248,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")'); - jsonb_path_query ------------------- @@ -202340,7 +201257,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")'); - jsonb_path_query ------------------- @@ -202348,7 +201265,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")'); - jsonb_path_query ------------------- @@ -202357,7 +201274,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")'); - jsonb_path_query ------------------- @@ -202366,22 +201283,22 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('null', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('true', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('1', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', '$.datetime()'); - jsonb_path_query ------------------- @@ -202389,34 +201306,34 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('[]', 'strict $.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('{}', '$.datetime()'); -ERROR: jsonpath item method .datetime() can only be applied to a string +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"bogus"', '$.datetime()'); -ERROR: datetime format is not recognized: "bogus" -HINT: Use a datetime template argument to specify the input data format. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34"', '$.datetime("aaa")'); -ERROR: invalid datetime format separator: "a" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"aaaa"', '$.datetime("HH24")'); -ERROR: invalid value "aa" for "HH24" -DETAIL: Value must be an integer. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")'; - ?column? ----------- @@ -202435,7 +201352,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()'); - jsonb_path_query ------------------- @@ -202444,17 +201361,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")'); -ERROR: trailing characters remain in input string after datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()'); -ERROR: trailing characters remain in input string after datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()'); - jsonb_path_query -------------------------------- @@ -202463,7 +201380,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()'); - jsonb_path_query ----------------------------- @@ -202472,7 +201389,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()'); - jsonb_path_query --------------------------- @@ -202481,7 +201398,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()'); - jsonb_path_query ------------------------ @@ -202490,7 +201407,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); - jsonb_path_query ------------------------ @@ -202499,17 +201416,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); -ERROR: unmatched format character "T" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev set time zone '+00'; select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query @@ -202519,12 +201436,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ @@ -202533,7 +201450,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ @@ -202542,7 +201459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ @@ -202551,7 +201468,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ @@ -202560,7 +201477,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- @@ -202569,12 +201486,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- @@ -202583,7 +201500,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- @@ -202592,7 +201509,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- @@ -202601,7 +201518,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- @@ -202610,7 +201527,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev set time zone '+10'; select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); - jsonb_path_query @@ -202620,12 +201537,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); -ERROR: input string is too short for datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ @@ -202634,7 +201551,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); - jsonb_path_query ------------------------------ @@ -202643,7 +201560,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ @@ -202652,7 +201569,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); - jsonb_path_query ------------------------------ @@ -202661,7 +201578,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); - jsonb_path_query ------------------- @@ -202670,12 +201587,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); -ERROR: input string is too short for datetime format +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- @@ -202684,7 +201601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); - jsonb_path_query ------------------- @@ -202693,7 +201610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- @@ -202702,7 +201619,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); - jsonb_path_query ------------------- @@ -202711,7 +201628,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev set time zone default; select jsonb_path_query('"2017-03-10"', '$.datetime().type()'); - jsonb_path_query @@ -202721,7 +201638,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10"', '$.datetime()'); - jsonb_path_query ------------------- @@ -202730,7 +201647,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()'); - jsonb_path_query -------------------------------- @@ -202739,7 +201656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); - jsonb_path_query ------------------------ @@ -202748,7 +201665,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ----------------------------- @@ -202757,7 +201674,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------------------ @@ -202766,7 +201683,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ----------------------------- @@ -202775,7 +201692,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ @@ -202784,7 +201701,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------------------ @@ -202793,13 +201710,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10" -HINT: Use a datetime template argument to specify the input data format. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10 12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- @@ -202808,7 +201725,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10T12:34:56.789+3:10"', '$.datetime()'); - jsonb_path_query ---------------------------------- @@ -202817,13 +201734,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"2017-03-10t12:34:56.789+3:10"', '$.datetime()'); -ERROR: datetime format is not recognized: "2017-03-10t12:34:56.789+3:10" -HINT: Use a datetime template argument to specify the input data format. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56"', '$.datetime().type()'); - jsonb_path_query --------------------------- @@ -202832,7 +201749,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56"', '$.datetime()'); - jsonb_path_query ------------------- @@ -202841,7 +201758,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); - jsonb_path_query ------------------------ @@ -202850,7 +201767,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56+3"', '$.datetime()'); - jsonb_path_query ------------------- @@ -202859,7 +201776,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); - jsonb_path_query ------------------------ @@ -202868,7 +201785,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); - jsonb_path_query ------------------- @@ -202877,7 +201794,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev set time zone '+00'; -- date comparison select jsonb_path_query( @@ -202887,7 +201804,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); @@ -202895,7 +201812,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); @@ -202903,7 +201820,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query_tz( '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); @@ -202946,7 +201863,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); @@ -202954,7 +201871,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); @@ -202962,7 +201879,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query_tz( '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); @@ -203003,7 +201920,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); @@ -203011,7 +201928,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); @@ -203019,7 +201936,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query_tz( '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); @@ -203061,7 +201978,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); @@ -203069,7 +201986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); @@ -203077,7 +201994,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query_tz( '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); @@ -203120,7 +202037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); @@ -203128,7 +202045,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query( '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); @@ -203136,7 +202053,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -HINT: Use *_tz() function for time zone support. +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev select jsonb_path_query_tz( '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); @@ -203185,7 +202102,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou +select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev set time zone default; -- jsonpath operators SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]'); @@ -203197,7 +202114,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); - jsonb_path_query ------------------- @@ -203205,12 +202122,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query('[{"a": 1}]', 'false'); - jsonb_path_query ------------------- @@ -203219,12 +202136,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); -ERROR: JSON object does not contain key "a" +ERROR: jsonb_path_query_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); - jsonb_path_query_array ------------------------- @@ -203233,7 +202150,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_array ------------------------- @@ -203242,7 +202159,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_array ------------------------- @@ -203251,7 +202168,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_array ------------------------- @@ -203278,7 +202195,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: JSON object does not contain key "a" +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true); - jsonb_path_query_first ------------------------- @@ -203298,7 +202215,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); - jsonb_path_query_first ------------------------- @@ -203307,7 +202224,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); - jsonb_path_query_first ------------------------- @@ -203316,7 +202233,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_query_first ------------------------- @@ -203343,7 +202260,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: could not find jsonpath variable "undefined_var" +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); - jsonb_path_query_first ------------------------- @@ -203352,7 +202269,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_query_first(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; - ?column? ----------- @@ -203381,7 +202298,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_exists(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}'); - jsonb_path_exists -------------------- @@ -203408,7 +202325,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou -ERROR: could not find jsonpath variable "undefined_var" +ERROR: jsonb_path_exists(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_exists('[{"a": 1}]', 'false'); - jsonb_path_exists -------------------- @@ -203417,7 +202334,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_exists(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_match('true', '$', silent => false); - jsonb_path_match ------------------- @@ -203544,12 +202461,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_match(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); -ERROR: could not find jsonpath variable "undefined_var" +ERROR: jsonb_path_match(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev SELECT jsonb_path_match('[{"a": 1}]', 'false'); - jsonb_path_match ------------------- @@ -203558,7 +202475,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb_jsonpath.ou - +ERROR: jsonb_path_match(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev -- test string comparison (Unicode codepoint collation) WITH str(j, num) AS ( @@ -204188,7 +203105,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - "infinity" + to_jsonb +-------------------------------- -+ "294276-12-31T23:59:59.999999" ++ "294277-01-01T23:59:59.999999" (1 row) select to_jsonb(timestamp '-Infinity'); @@ -204197,7 +203114,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - "-infinity" + to_jsonb +------------------------ -+ "-4713-11-24T00:00:00" ++ "-4714-11-23T00:00:00" (1 row) select to_jsonb(timestamptz 'Infinity'); @@ -204206,7 +203123,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - "infinity" + to_jsonb +-------------------------------------- -+ "294276-12-31T15:59:59.999999-08:00" ++ "294277-01-01T15:59:59.999999-08:00" (1 row) select to_jsonb(timestamptz '-Infinity'); @@ -204215,7 +203132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - "-infinity" + to_jsonb +------------------------------ -+ "-4713-11-23T16:00:00-08:00" ++ "-4714-11-22T16:00:00-08:00" (1 row) --jsonb_agg @@ -204593,7 +203510,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); jsonb_build_object ------------------------------------------------------------------------- @@ -204766,7 +203683,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev -- empty objects/arrays SELECT jsonb_build_array(); jsonb_build_array @@ -204905,21 +203822,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +CREATE DOMAIN jsb_int_not_null AS int NOT NULL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create type jb_unordered_pair as (x int, y int); create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -204927,7 +203844,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TYPE jsbrec AS ( i int, ia _int4, @@ -205675,7 +204592,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label + as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select *, c is null as c_is_null from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb) as t(a int, b jsonb, c text, x int); @@ -205716,7 +204633,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); - ia2 ---------------- @@ -205728,7 +204645,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); - ia2 ------------------ @@ -205740,7 +204657,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/32552/v24.2 ++See: https://go.crdb.dev/issue-v/32552/dev select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json); out ------------ @@ -205792,28 +204709,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label +DROP DOMAIN jsb_int_not_null + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN jsb_int_array_1d; +ERROR: at or near "jsb_int_array_1d": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN jsb_int_array_1d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN jsb_int_array_2d; +ERROR: at or near "jsb_int_array_2d": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN jsb_int_array_2d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP DOMAIN jb_ordered_pair; +ERROR: at or near "jb_ordered_pair": syntax error: unimplemented: this syntax +DETAIL: source SQL: +DROP DOMAIN jb_ordered_pair + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev DROP TYPE jb_unordered_pair; -- indexing SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; @@ -206564,7 +205481,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops); +ERROR: unimplemented: operator class "jsonb_path_ops" is not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/81115/v24.2 ++See: https://go.crdb.dev/issue-v/81115/dev SET enable_seqscan = off; +WARNING: setting session var "enable_seqscan" is a no-op SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; @@ -206840,7 +205757,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label create index on nestjsonb using gin(j jsonb_path_ops); +ERROR: unimplemented: operator class "jsonb_path_ops" is not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/81115/v24.2 ++See: https://go.crdb.dev/issue-v/81115/dev set enable_seqscan = on; +WARNING: setting session var "enable_seqscan" is a no-op set enable_bitmapscan = off; @@ -208004,7 +206921,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- @@ -208013,7 +206930,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- @@ -208022,7 +206939,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- @@ -208031,7 +206948,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- @@ -208040,7 +206957,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- @@ -208049,7 +206966,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); - jsonb_to_tsvector ----------------------------------------------------------------------------------------- @@ -208058,7 +206975,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); - jsonb_to_tsvector --------------------------------- @@ -208067,7 +206984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); - jsonb_to_tsvector -------------------- @@ -208076,7 +206993,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); - jsonb_to_tsvector -------------------- @@ -208085,7 +207002,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); - jsonb_to_tsvector -------------------- @@ -208094,7 +207011,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); - jsonb_to_tsvector ---------------------------------- @@ -208103,7 +207020,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- to_tsvector corner cases select to_tsvector('""'::jsonb); - to_tsvector @@ -208146,7 +207063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('{}'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- @@ -208155,7 +207072,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('[]'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- @@ -208164,7 +207081,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('null'::jsonb, '"all"'); - jsonb_to_tsvector -------------------- @@ -208173,18 +207090,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); -ERROR: wrong flag in flag array: "" -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); -ERROR: wrong flag type, only arrays and scalars are allowed +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); - jsonb_to_tsvector -------------------- @@ -208193,19 +207110,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); -ERROR: flag array element is not a string -HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +ERROR: jsonb_to_tsvector(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- ts_headline for jsonb select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline @@ -208215,7 +207132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); - ts_headline ------------------------------------------------------------------------------------------------ @@ -208224,7 +207141,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- @@ -208233,7 +207150,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); - ts_headline ---------------------------------------------------------------------------------------------------- @@ -208242,7 +207159,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- corner cases for ts_headline with jsonb select ts_headline('null'::jsonb, tsquery('aaa & bbb')); - ts_headline @@ -208252,7 +207169,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('{}'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- @@ -208261,7 +207178,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); - ts_headline -------------- @@ -208270,7 +207187,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/jsonb.out --label - +ERROR: ts_headline(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev -- casts select 'true'::jsonb::bool; bool @@ -209141,7 +208058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/returning.out --l +CREATE TEMP TABLE foochild (fc int) INHERITS (foo) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO foochild VALUES(123,'child',999,-123); +ERROR: relation "foochild" does not exist ALTER TABLE foo ADD COLUMN f4 int8 DEFAULT 99; @@ -209784,7 +208701,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l -- Test plan_cache_mode create table test_mode (a int); insert into test_mode select 1 from generate_series(1,1000) union all select 2; -@@ -286,47 +320,34 @@ +@@ -286,45 +320,29 @@ prepare test_mode_pp (int) as select count(*) from test_mode where a = $1; select name, generic_plans, custom_plans from pg_prepared_statements where name = 'test_mode_pp'; @@ -209796,7 +208713,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l +ERROR: column "generic_plans" does not exist -- up to 5 executions, custom plan is used set plan_cache_mode to auto; -+ERROR: unrecognized configuration parameter "plan_cache_mode" explain (costs off) execute test_mode_pp(2); - QUERY PLAN ----------------------------------------------------------- @@ -209820,7 +208736,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l +ERROR: column "generic_plans" does not exist -- force generic plan set plan_cache_mode to force_generic_plan; -+ERROR: unrecognized configuration parameter "plan_cache_mode" explain (costs off) execute test_mode_pp(2); - QUERY PLAN ------------------------------ @@ -209844,11 +208759,8 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l +ERROR: column "generic_plans" does not exist -- get to generic plan by 5 executions set plan_cache_mode to auto; -+ERROR: unrecognized configuration parameter "plan_cache_mode" execute test_mode_pp(1); -- 1x - count - ------- -@@ -353,11 +374,7 @@ +@@ -353,11 +371,7 @@ select name, generic_plans, custom_plans from pg_prepared_statements where name = 'test_mode_pp'; @@ -209861,7 +208773,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l execute test_mode_pp(1); -- 5x count ------- -@@ -366,35 +383,24 @@ +@@ -366,35 +380,23 @@ select name, generic_plans, custom_plans from pg_prepared_statements where name = 'test_mode_pp'; @@ -209887,7 +208799,6 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plancache.out --l +HINT: try \h -- but we can force a custom plan set plan_cache_mode to force_custom_plan; -+ERROR: unrecognized configuration parameter "plan_cache_mode" explain (costs off) execute test_mode_pp(2); - QUERY PLAN ----------------------------------------------------------- @@ -209926,7 +208837,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/conversion.out -- LANGUAGE C STRICT; +ERROR: unimplemented: C is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/102201/v24.2 ++See: https://go.crdb.dev/issue-v/102201/dev CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE; SET SESSION AUTHORIZATION regress_conversion_user; +ERROR: at or near "regress_conversion_user": syntax error: unimplemented: this syntax @@ -209934,7 +208845,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/conversion.out -- +SET SESSION AUTHORIZATION regress_conversion_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ERROR: at or near "myconv": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -210648,7 +209559,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +) +^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest VALUES (1, 'one'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (1, 'one'); @@ -211071,7 +209982,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xmlroot(xml '', version no value, standalone no value) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xmlroot(xml '', version '2.0'); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version '2.0'); @@ -211082,7 +209993,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xmlroot(xml '', version '2.0') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xmlroot(xml '', version no value, standalone yes); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version no value, standalone ye... @@ -211093,7 +210004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xmlroot(xml '', version no value, standalone yes) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xmlroot(xml '', version no value, standalone yes); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xml '', version no... @@ -211104,7 +210015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xmlroot(xml '', version no value, standalone yes) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot(xmlroot(xml '', version '1.0'), version... @@ -211115,7 +210026,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xmlroot('', version no value, standalone no); -ERROR: unsupported XML feature -LINE 1: SELECT xmlroot('... @@ -211425,7 +210336,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml 'bar' IS DOCUMENT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml 'barfoo' IS DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml 'barfoo' IS DOCUMENT; @@ -211436,7 +210347,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml 'barfoo' IS DOCUMENT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml '' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml '' IS NOT DOCUMENT; @@ -211447,7 +210358,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml '' IS NOT DOCUMENT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml 'abc' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT xml 'abc' IS NOT DOCUMENT; @@ -211458,7 +210369,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml 'abc' IS NOT DOCUMENT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT '<>' IS NOT DOCUMENT; -ERROR: unsupported XML feature -LINE 1: SELECT '<>' IS NOT DOCUMENT; @@ -211514,7 +210425,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +PREPARE foo (xml) AS SELECT xmlconcat('', $1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SET XML OPTION DOCUMENT; +ERROR: at or near "option": syntax error +DETAIL: source SQL: @@ -211535,7 +210446,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml '' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SET XML OPTION CONTENT; +ERROR: at or near "option": syntax error +DETAIL: source SQL: @@ -211556,7 +210467,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml ' ' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' ' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml ''; -ERROR: unsupported XML feature -LINE 1: SELECT xml ''; @@ -211578,7 +210489,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml '' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml ' oops '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' oops '; @@ -211589,7 +210500,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml ' oops ' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml ' '; -ERROR: unsupported XML feature -LINE 1: SELECT xml ' '; @@ -211600,7 +210511,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml ' ' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xml ''; -ERROR: unsupported XML feature -LINE 1: SELECT xml ''; @@ -211611,7 +210522,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xml '' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev -- Test backwards parsing CREATE VIEW xmlview1 AS SELECT xmlcomment('test'); +ERROR: unknown function: xmlcomment() @@ -211657,7 +210568,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10)); -ERROR: unsupported XML feature -LINE 1: ...EATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as ... @@ -211839,7 +210750,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xpath_exists('//town[text() = ''Cwmbran'']','Bidford-on-AvonCwmbranBristol'::xml); -ERROR: unsupported XML feature -LINE 1: ...ELECT xpath_exists('//town[text() = ''Cwmbran'']','Bidford-on-AvonCwmbranBristol'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT xpath_exists('count(/nosuchtag)', ''::xml); -ERROR: unsupported XML feature -LINE 1: SELECT xpath_exists('count(/nosuchtag)', ''::xml); @@ -211861,7 +210772,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +SELECT xpath_exists('count(/nosuchtag)', ''::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest VALUES (4, 'BudvarfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (4, 'BudvarBudvarfreeCarlinglots'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest VALUES (5, 'MolsonfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (5, 'MolsonMolsonfreeCarlinglots'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest VALUES (6, 'BudvarfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (6, 'BudvarfreeCarlinglots'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest VALUES (7, 'MolsonfreeCarlinglots'::xml); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest VALUES (7, 'MolsonfreeCarlinglots'::xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data); - count -------- @@ -212128,7 +211039,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +CREATE TABLE xmldata(data xml) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmldata VALUES(' AU @@ -212610,7 +211521,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/xml_1.out --label +CREATE TABLE xmltest2(x xml, _path text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43355/v24.2 ++See: https://go.crdb.dev/issue-v/43355/dev INSERT INTO xmltest2 VALUES('1', 'A'); -ERROR: unsupported XML feature -LINE 1: INSERT INTO xmltest2 VALUES('1', 'A')... @@ -212723,7 +211634,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +CREATE TEMP TABLE temptest(col int) ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev -- while we're here, verify successful truncation of index with SQL function CREATE INDEX ON temptest(bit_length('')); +ERROR: relation "temptest" does not exist @@ -212780,7 +211691,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +CREATE TEMP TABLE temptest(col int) ON COMMIT DROP + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev INSERT INTO temptest VALUES (1); +ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO temptest VALUES (2); @@ -212825,7 +211736,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +CREATE TABLE temptest(col int) ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev CREATE TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1; -ERROR: ON COMMIT can only be used on temporary tables +ERROR: at or near "on": syntax error @@ -212844,7 +211755,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= + ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev INSERT INTO temptest1 VALUES (1); +ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO temptest2 VALUES (1); @@ -212870,7 +211781,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +CREATE TEMP TABLE temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev CREATE TEMP TABLE temptest4(col int REFERENCES temptest3); +ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; @@ -212885,7 +211796,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= as $$select 'temp'::text$$ language sql; +ERROR: unimplemented: cannot create UDFs under a temporary schema +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/104687/v24.2 ++See: https://go.crdb.dev/issue-v/104687/dev -- default should have pg_temp implicitly first, but only for tables select * from whereami; f1 @@ -212908,7 +211819,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +create domain pg_temp.nonempty as text check (value <> '') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- function-syntax invocation of types matches rules for functions select nonempty(''); -ERROR: function nonempty(unknown) does not exist @@ -213054,7 +211965,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +create temp table temp_inh_oncommit_test (a int) on commit drop + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev create temp table temp_inh_oncommit_test1 () inherits(temp_inh_oncommit_test) on commit delete rows; +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -213063,7 +211974,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= + inherits(temp_inh_oncommit_test) on commit delete rows + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into temp_inh_oncommit_test1 values (1); +ERROR: current transaction is aborted, commands ignored until end of transaction block commit; @@ -213078,7 +211989,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +create temp table temp_inh_oncommit_test (a int) on commit delete rows + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46556/v24.2 ++See: https://go.crdb.dev/issue-v/46556/dev create temp table temp_inh_oncommit_test1 () inherits(temp_inh_oncommit_test) on commit drop; +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -213087,7 +211998,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= + inherits(temp_inh_oncommit_test) on commit drop + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into temp_inh_oncommit_test1 values (1); +ERROR: current transaction is aborted, commands ignored until end of transaction block insert into temp_inh_oncommit_test values (1); @@ -213120,7 +212031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= $$ select '2pc_func'::text $$ language sql; +ERROR: unimplemented: cannot create UDFs under a temporary schema +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/104687/v24.2 ++See: https://go.crdb.dev/issue-v/104687/dev prepare transaction 'twophase_func'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects +ERROR: at or near "twophase_func": syntax error @@ -213152,7 +212063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/temp.out --label= +create operator pg_temp.@@ (leftarg = int4, rightarg = int4, procedure = int4mi) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev prepare transaction 'twophase_operator'; -ERROR: cannot PREPARE a transaction that has operated on temporary objects +ERROR: at or near "twophase_operator": syntax error @@ -213404,7 +212315,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev fetch all from rf_cur; - i | j | o ----+---+--- @@ -213957,7 +212868,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev --invokes ExecReScanFunctionScan - all these cases should materialize the function only once -- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function -- is on the inner path of a nestloop join @@ -214740,7 +213651,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +RETURNS TABLE(a int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev SELECT * FROM rngfunc(); - a ---- @@ -214765,7 +213676,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +RETURNS TABLE(a int, b int) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev SELECT * FROM rngfunc(3); - a | b ----+--- @@ -214793,7 +213704,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +RETURNS TABLE(a varchar(5)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev SELECT * FROM rngfunc() GROUP BY 1; - a -------- @@ -214934,19 +213845,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -- triggers will fire, too create function noticetrigger() returns trigger as $$ -@@ -1762,72 +1314,60 @@ +@@ -1762,72 +1314,59 @@ raise notice 'noticetrigger % %', new.f1, new.data; return null; end $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tnoticetrigger after insert on tt for each row execute procedure noticetrigger(); -+ERROR: at or near "tnoticetrigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tnoticetrigger after insert on tt for each row -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev select insert_tt2('foolme','barme') limit 1; -NOTICE: noticetrigger 11 foolme -NOTICE: noticetrigger 12 barme @@ -214995,11 +213905,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +create rule insert_tt_rule as on insert to tt do also + ^ +HINT: You have attempted to use a feature that is not yet implemented. - ++ +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. -+ + +If you would rather not post publicly, please contact us directly +using the support form. + @@ -215046,7 +213956,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -- test case for a whole-row-variable bug create function rngfunc1(n integer, out a text, out b text) -@@ -1835,6 +1375,18 @@ +@@ -1835,6 +1374,18 @@ language sql as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$; set work_mem='64kB'; @@ -215065,7 +213975,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select t.a, t, t.a from rngfunc1(10000) t limit 1; a | t | a -------+-------------------+------- -@@ -1842,6 +1394,18 @@ +@@ -1842,6 +1393,18 @@ (1 row) reset work_mem; @@ -215084,7 +213994,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select t.a, t, t.a from rngfunc1(10000) t limit 1; a | t | a -------+-------------------+------- -@@ -1871,31 +1435,24 @@ +@@ -1871,31 +1434,24 @@ select * from array_to_set(array['one', 'two']); -- fail ERROR: a column definition list is required for functions returning "record" @@ -215109,7 +214019,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev -- with "strict", this function can't be inlined in FROM explain (verbose, costs off) select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); @@ -215128,7 +214038,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -- but without, it can be: create or replace function array_to_set(anyarray) returns setof record as $$ select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i -@@ -1914,26 +1471,21 @@ +@@ -1914,26 +1470,21 @@ 2 | two (2 rows) @@ -215149,7 +214059,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev explain (verbose, costs off) select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); - QUERY PLAN @@ -215167,7 +214077,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- create temp table rngfunc(f1 int8, f2 int8); create function testrngfunc() returns record as $$ insert into rngfunc values (1,2) returning *; -@@ -1952,8 +1504,6 @@ +@@ -1952,8 +1503,6 @@ select * from testrngfunc(); -- fail ERROR: a column definition list is required for functions returning "record" @@ -215176,7 +214086,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- drop function testrngfunc(); create function testrngfunc() returns setof record as $$ insert into rngfunc values (1,2), (3,4) returning *; -@@ -1974,8 +1524,6 @@ +@@ -1974,8 +1523,6 @@ select * from testrngfunc(); -- fail ERROR: a column definition list is required for functions returning "record" @@ -215185,7 +214095,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- drop function testrngfunc(); -- Check that typmod imposed by a composite type is honored create type rngfunc_type as (f1 numeric(35,6), f2 numeric(35,2)); -@@ -1984,12 +1532,11 @@ +@@ -1984,12 +1531,11 @@ $$ language sql immutable; explain (verbose, costs off) select testrngfunc(); @@ -215203,7 +214113,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select testrngfunc(); testrngfunc ----------------- -@@ -1998,13 +1545,11 @@ +@@ -1998,13 +1544,11 @@ explain (verbose, costs off) select * from testrngfunc(); @@ -215222,7 +214132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select * from testrngfunc(); f1 | f2 ----------+------ -@@ -2016,12 +1561,11 @@ +@@ -2016,12 +1560,11 @@ $$ language sql volatile; explain (verbose, costs off) select testrngfunc(); @@ -215240,7 +214150,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select testrngfunc(); testrngfunc ----------------- -@@ -2030,13 +1574,11 @@ +@@ -2030,13 +1573,11 @@ explain (verbose, costs off) select * from testrngfunc(); @@ -215259,7 +214169,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select * from testrngfunc(); f1 | f2 ----------+------ -@@ -2049,13 +1591,11 @@ +@@ -2049,13 +1590,11 @@ $$ language sql immutable; explain (verbose, costs off) select testrngfunc(); @@ -215278,7 +214188,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select testrngfunc(); testrngfunc ----------------- -@@ -2064,12 +1604,11 @@ +@@ -2064,12 +1603,11 @@ explain (verbose, costs off) select * from testrngfunc(); @@ -215296,7 +214206,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select * from testrngfunc(); f1 | f2 ----------+------ -@@ -2081,13 +1620,11 @@ +@@ -2081,13 +1619,11 @@ $$ language sql volatile; explain (verbose, costs off) select testrngfunc(); @@ -215315,7 +214225,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select testrngfunc(); testrngfunc ----------------- -@@ -2096,13 +1633,11 @@ +@@ -2096,13 +1632,11 @@ explain (verbose, costs off) select * from testrngfunc(); @@ -215334,7 +214244,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- select * from testrngfunc(); f1 | f2 ----------+------ -@@ -2112,62 +1647,45 @@ +@@ -2112,62 +1646,45 @@ create or replace function testrngfunc() returns setof rngfunc_type as $$ select 1, 2 union select 3, 4 order by 1; $$ language sql immutable; @@ -215414,11 +214324,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -NOTICE: drop cascades to function testrngfunc() +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- -- Check some cases involving added/dropped columns in a rowtype result -- -@@ -2178,79 +1696,37 @@ +@@ -2178,79 +1695,37 @@ create or replace function get_first_user() returns users as $$ SELECT * FROM users ORDER BY userid LIMIT 1; $$ language sql stable; @@ -215511,7 +214421,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -- We used to have a bug that would allow the above to succeed, posing -- hazards for later execution of the view. Check that the internal -- defenses for those hazards haven't bit-rotted, in case some other -@@ -2264,18 +1740,17 @@ +@@ -2264,18 +1739,17 @@ returning pg_describe_object(classid, objid, objsubid) as obj, pg_describe_object(refclassid, refobjid, refobjsubid) as ref, deptype; @@ -215531,13 +214441,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- -ERROR: cannot alter type of a column used by a view or rule -DETAIL: rule _RETURN on view usersview depends on column "seq" +ERROR: ALTER COLUMN TYPE from int to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- likewise, check we don't crash if the dependency goes wrong begin; -- destroy the dependency entry that prevents the ALTER: -@@ -2286,19 +1761,18 @@ +@@ -2286,19 +1760,18 @@ returning pg_describe_object(classid, objid, objsubid) as obj, pg_describe_object(refclassid, refobjid, refobjsubid) as ref, deptype; @@ -215563,7 +214473,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- drop table users; -- check behavior with type coercion required for a set-op create or replace function rngfuncbar() returns setof text as -@@ -2320,17 +1794,11 @@ +@@ -2320,17 +1793,11 @@ -- this function is now inlinable, too: explain (verbose, costs off) select * from rngfuncbar(); @@ -215586,7 +214496,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- drop function rngfuncbar(); -- check handling of a SQL function with multiple OUT params (bug #5777) create or replace function rngfuncbar(out integer, out numeric) as -@@ -2344,115 +1812,100 @@ +@@ -2344,115 +1811,100 @@ create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2) $$ language sql; select * from rngfuncbar(); -- fail @@ -215652,7 +214562,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +create function extractq2_2(t int8_tbl) returns table(ret1 int8) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev explain (verbose, costs off) select x from int8_tbl, extractq2_2(int8_tbl) f(x); - QUERY PLAN @@ -215690,7 +214600,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- +create function extractq2_2_opt(t int8_tbl) returns table(ret1 int8) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev explain (verbose, costs off) select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); - QUERY PLAN @@ -215760,7 +214670,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- (0 rows) drop type rngfunc2; -@@ -2463,25 +1916,16 @@ +@@ -2463,25 +1915,16 @@ from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb]) as unnested_modules(module)) as ss, jsonb_to_recordset(ss.lecture) as j (id text); @@ -215793,7 +214703,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rangefuncs.out -- - +ERROR: jsonb_path_query_array(): unimplemented: this function is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22513/v24.2 ++See: https://go.crdb.dev/issue-v/22513/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sequence.out --label=/mnt/data1/postgres/src/test/regress/results/sequence.out /mnt/data1/postgres/src/test/regress/expected/sequence.out /mnt/data1/postgres/src/test/regress/results/sequence.out --- /mnt/data1/postgres/src/test/regress/expected/sequence.out +++ /mnt/data1/postgres/src/test/regress/results/sequence.out @@ -215895,9 +214805,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sequence.out --la CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, f5 bigserial, f6 serial8); +NOTICE: upgrading the column f3 to INT8 to utilize the session serial_normalization setting -+HINT: change the serial_normalization to sql_sequence or sql_sequence_cached if you wish to use a smaller sized serial column at the cost of performance. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++HINT: change the serial_normalization to sql_sequence or sql_sequence_cached if you wish to use a smaller sized serial column at the cost of performance. See https://www.cockroachlabs.com/docs/dev/serial.html +NOTICE: upgrading the column f4 to INT8 to utilize the session serial_normalization setting -+HINT: change the serial_normalization to sql_sequence or sql_sequence_cached if you wish to use a smaller sized serial column at the cost of performance. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++HINT: change the serial_normalization to sql_sequence or sql_sequence_cached if you wish to use a smaller sized serial column at the cost of performance. See https://www.cockroachlabs.com/docs/dev/serial.html INSERT INTO serialTest2 (f1) VALUES ('test_defaults'); INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6) @@ -216093,7 +215003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sequence.out --la INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +ERROR: unimplemented: CYCLE option is not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/20961/v24.2 ++See: https://go.crdb.dev/issue-v/20961/dev SELECT nextval('sequence_test2'); nextval --------- @@ -216158,7 +215068,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sequence.out --la INCREMENT BY -4 MINVALUE -36 MAXVALUE -5 CYCLE; +ERROR: unimplemented: CYCLE option is not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/20961/v24.2 ++See: https://go.crdb.dev/issue-v/20961/dev SELECT nextval('sequence_test2'); nextval --------- @@ -216237,7 +215147,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/sequence.out --la INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +ERROR: unimplemented: CYCLE option is not supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/20961/v24.2 ++See: https://go.crdb.dev/issue-v/20961/dev SELECT setval('sequence_test2', -100); -- error -ERROR: setval: value -100 is out of bounds for sequence "sequence_test2" (5..36) +ERROR: setval(): value -100 is out of bounds for sequence "sequence_test2" (1..9223372036854775807) @@ -216760,7 +215670,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la -- Test foreign-key checks CREATE TABLE trunc_b (a int REFERENCES truncate_a); CREATE TABLE trunc_c (a serial PRIMARY KEY); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE trunc_d (a int REFERENCES trunc_c); CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c); TRUNCATE TABLE truncate_a; -- fail @@ -216853,7 +215763,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la +CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO trunc_fa VALUES (3, 'three'); +ERROR: relation "trunc_fa" does not exist CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f); @@ -216862,7 +215772,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la +CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO trunc_fb VALUES (4, 444); +ERROR: relation "trunc_fb" does not exist CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa); @@ -216871,7 +215781,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la +CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE'); +ERROR: relation "trunc_faa" does not exist BEGIN; @@ -217019,27 +215929,26 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la -- Test ON TRUNCATE triggers CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text); CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text, -@@ -294,12 +228,19 @@ +@@ -294,12 +228,18 @@ return null; end; $$ LANGUAGE plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- basic before trigger INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); CREATE TRIGGER t BEFORE TRUNCATE ON trunc_trigger_test FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('before trigger truncate'); -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER t -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; Row count in test table ------------------------- -@@ -319,12 +260,17 @@ +@@ -319,12 +259,14 @@ (1 row) SELECT * FROM trunc_trigger_log; @@ -217052,29 +215961,23 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la +(0 rows) DROP TRIGGER t ON trunc_trigger_test; -+ERROR: at or near "t": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER t ON trunc_trigger_test -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev truncate trunc_trigger_log; -- same test with an after trigger INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); -@@ -332,6 +278,12 @@ +@@ -332,6 +274,9 @@ AFTER TRUNCATE ON trunc_trigger_test FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('after trigger truncate'); -+ERROR: at or near "tt": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER tt -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; Row count in test table ------------------------- -@@ -351,17 +303,18 @@ +@@ -351,17 +296,18 @@ (1 row) SELECT * FROM trunc_trigger_log; @@ -217098,7 +216001,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la id1 integer default nextval('truncate_a_id1')); ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1; INSERT INTO truncate_a DEFAULT VALUES; -@@ -384,14 +337,20 @@ +@@ -384,14 +330,20 @@ (2 rows) TRUNCATE truncate_a RESTART IDENTITY; @@ -217122,7 +216025,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44)); INSERT INTO truncate_b DEFAULT VALUES; -@@ -414,56 +373,74 @@ +@@ -414,56 +366,74 @@ (2 rows) TRUNCATE truncate_b RESTART IDENTITY; @@ -217215,7 +216118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la -- foreign key on partitioned table: partition key is referencing column. -- Make sure truncate did execute on all tables CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$ -@@ -472,6 +449,7 @@ +@@ -472,6 +442,7 @@ INSERT INTO truncpart VALUES (1), (100), (150); END $$; @@ -217223,7 +216126,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/truncate.out --la CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int) RETURNS SETOF record LANGUAGE plpgsql AS $$ BEGIN -@@ -481,114 +459,173 @@ +@@ -481,114 +452,173 @@ ORDER BY 2, 4; END $$; @@ -217474,7 +216377,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE attmp ADD COLUMN g polygon + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN i char; ALTER TABLE attmp ADD COLUMN k int4; ALTER TABLE attmp ADD COLUMN l tid; @@ -217491,28 +216394,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE attmp ADD COLUMN q point + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN r lseg; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN r lseg + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN s path; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN s path + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN t box; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN t box + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN v timestamp; ALTER TABLE attmp ADD COLUMN w interval; ALTER TABLE attmp ADD COLUMN x float8[]; @@ -217541,7 +216444,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE attmp ADD COLUMN g polygon + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN i char; ALTER TABLE attmp ADD COLUMN k int4; ALTER TABLE attmp ADD COLUMN l tid; @@ -217558,28 +216461,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE attmp ADD COLUMN q point + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN r lseg; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN r lseg + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN s path; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN s path + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN t box; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE attmp ADD COLUMN t box + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev ALTER TABLE attmp ADD COLUMN v timestamp; ALTER TABLE attmp ADD COLUMN w interval; ALTER TABLE attmp ADD COLUMN x float8[]; @@ -217921,7 +216824,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; +ERROR: unimplemented: cannot drop UNIQUE constraint "onek_unique1_constraint_foo" using ALTER TABLE DROP CONSTRAINT, use DROP INDEX CASCADE instead +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/42840/v24.2 ++See: https://go.crdb.dev/issue-v/42840/dev -- renaming constraint ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; @@ -217971,7 +216874,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d constraint_rename_test2 - Table "public.constraint_rename_test2" - Column | Type | Collation | Nullable | Default @@ -218226,14 +217129,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE attmp6 () INHERITS (attmp3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE attmp7 () INHERITS (attmp3); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +CREATE TABLE attmp7 () INHERITS (attmp3) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev INSERT INTO attmp6 VALUES (6, 30), (7, 16); +ERROR: relation "attmp6" does not exist ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; @@ -218265,7 +217168,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child_noinh_convalid () inherits (parent_noinh_convalid) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into parent_noinh_convalid values (1); insert into child_noinh_convalid values (1); +ERROR: relation "child_noinh_convalid" does not exist @@ -218350,14 +217253,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table nv_child_2010 () inherits (nv_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table nv_child_2011 () inherits (nv_parent); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table nv_child_2011 () inherits (nv_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; +ERROR: relation "nv_child_2010" does not exist alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; @@ -218385,7 +217288,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date; - QUERY PLAN ---------------------------------------------------------------------------- @@ -218523,7 +217426,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable; -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented -DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. @@ -218537,7 +217440,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/18846/v24.2 ++See: https://go.crdb.dev/issue-v/18846/dev ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable(ptest1, ptest2); -ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented @@ -218579,7 +217482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE; +ERROR: at or near "immediate": syntax error: unimplemented: this syntax @@ -218588,7 +217491,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; +ERROR: at or near "deferred": syntax error: unimplemented: this syntax @@ -218597,14 +217500,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE; +ERROR: at or near "not": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; +ERROR: at or near "deferrable": syntax error @@ -218619,7 +217522,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; +ERROR: at or near "deferrable": syntax error @@ -218634,7 +217537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/31632/v24.2 ++See: https://go.crdb.dev/issue-v/31632/dev SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint WHERE tgrelid = 'pktable'::regclass @@ -218738,7 +217641,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table atacc3 (test3 int) inherits (atacc1, atacc2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table atacc2 add constraint foo check (test2>0); -- fail and then succeed on atacc2 insert into atacc2 (test2) values (-3); @@ -218766,7 +217669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table atacc3 (test3 int) inherits (atacc1, atacc2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table atacc3 no inherit atacc2; +ERROR: at or near "inherit": syntax error +DETAIL: source SQL: @@ -218875,7 +217778,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table atacc2 (test2 int) inherits (atacc1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- ok: alter table atacc1 add constraint foo check (test>0) no inherit; +ERROR: at or near "no": syntax error @@ -218909,7 +217812,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -ERROR: could not create unique index "atacc_test1" -DETAIL: Key (test)=(0) is duplicated. +ERROR: ALTER COLUMN TYPE from int to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` drop table atacc1; @@ -219073,9 +217976,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -ERROR: column "test" is in a primary key +ERROR: column "test" is in a primary index alter table atacc1 drop constraint "atacc1_pkey"; -+ERROR: relation "atacc1" (1890): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "atacc1" (1893): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev alter table atacc1 alter column test drop not null; +ERROR: column "test" is in a primary index insert into atacc1 values (null); @@ -219145,7 +218048,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child (b varchar(255)) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table parent alter a set not null; insert into parent values (NULL); -ERROR: null value in column "a" of relation "parent" violates not-null constraint @@ -219605,7 +218508,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - drop table atacc1; -- test constraint error reporting in presence of dropped columns create table atacc1 (id serial primary key, value int check (value < 10)); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html insert into atacc1(value) values (100); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check" -DETAIL: Failing row contains (1, 100). @@ -219631,7 +218534,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child (d varchar(255)) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into child values (12, 13, 'testing'); +ERROR: relation "child" does not exist select * from parent; @@ -219689,7 +218592,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child (a float4) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table child (b decimal(10,7)) inherits (parent); -- fail -NOTICE: moving and merging column "b" with inherited definition -DETAIL: User-specified column moved to the position of the inherited column. @@ -219700,7 +218603,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child (b decimal(10,7)) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table child (c text collate "POSIX") inherits (parent); -- fail -NOTICE: moving and merging column "c" with inherited definition -DETAIL: User-specified column moved to the position of the inherited column. @@ -219718,7 +218621,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table child (a double precision, b decimal(10,4)) inherits (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev drop table child; +ERROR: relation "child" does not exist drop table parent; @@ -219763,14 +218666,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table dropColumnChild (c int) inherits (dropColumn) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table dropColumnAnother (d int) inherits (dropColumnChild); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table dropColumnAnother (d int) inherits (dropColumnChild) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- these two should fail alter table dropColumnchild drop column a; -ERROR: cannot drop inherited column "a" @@ -219790,14 +218693,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table renameColumnChild (b int) inherits (renameColumn) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table renameColumnAnother (c int) inherits (renameColumnChild); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table renameColumnAnother (c int) inherits (renameColumnChild) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- these three should fail alter table renameColumnChild rename column a to d; -ERROR: cannot rename inherited column "a" @@ -219831,7 +218734,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1 (f1 int not null) inherits(p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- should be rejected since c1.f1 is inherited alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" @@ -219862,7 +218765,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1 () inherits(p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- should be rejected since c1.f1 is inherited alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" @@ -219884,7 +218787,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1 () inherits(p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- should be rejected since c1.f1 is inherited alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" @@ -219903,7 +218806,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1 (f1 int not null) inherits(p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev -- should be rejected since c1.f1 is inherited alter table c1 drop column f1; -ERROR: cannot drop inherited column "f1" @@ -219923,14 +218826,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1(age int) inherits(p1,p2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table gc1() inherits (c1); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table gc1() inherits (c1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev select relname, attname, attinhcount, attislocal from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped @@ -219959,7 +218862,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- should work alter table only p1 drop column name; -@@ -1773,188 +2092,186 @@ +@@ -1773,188 +2092,178 @@ alter table p2 drop column name; -- should be rejected since its inherited alter table gc1 drop column name; @@ -220012,14 +218915,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table depth1(c text) inherits (depth0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table depth2() inherits (depth1); +ERROR: at or near "(": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create table depth2() inherits (depth1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table depth0 add c text; -NOTICE: merging definition of column "c" for child "depth1" select attrelid::regclass, attname, attinhcount, attislocal @@ -220042,7 +218945,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create table c1 (f2 text, f3 int) inherits (p1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev alter table p1 add column a1 int check (a1 > 0); alter table p1 add column f2 text; -NOTICE: merging definition of column "f2" for child "c1" @@ -220080,7 +218983,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create domain mytype as text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create temp table foo (f1 text, f2 mytype, f3 text); +ERROR: type "mytype" does not exist insert into foo values('bb','cc','dd'); @@ -220099,7 +219002,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +drop domain mytype cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select * from foo; - f1 | f3 -----+---- @@ -220158,17 +219061,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - alter table anothertab alter column atcol1 type boolean; -- fails -ERROR: column "atcol1" cannot be cast automatically to type boolean -HINT: You might need to specify "USING atcol1::boolean". -+ERROR: ALTER COLUMN TYPE from int to bool is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 -+-- -+you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ++ERROR: column "atcol1" cannot be cast automatically to type BOOL ++HINT: You might need to specify "USING atcol1::BOOL". alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails -ERROR: result of USING clause for column "atcol1" cannot be cast automatically to type boolean -HINT: You might need to add an explicit cast. -+ERROR: ALTER COLUMN TYPE from int to bool is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 -+-- -+you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ++ERROR: default for column "atcol1" cannot be cast automatically to type BOOL alter table anothertab alter column atcol1 type integer; select * from anothertab; atcol1 | atcol2 @@ -220198,7 +219096,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - when atcol2 is false then 'IT WAS FALSE' else 'IT WAS NULL!' end; +ERROR: ALTER COLUMN TYPE from bool to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` select * from anothertab; @@ -220215,28 +219113,25 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - alter table anothertab alter column atcol1 type boolean using case when atcol1 % 2 = 0 then true else false end; -- fails -ERROR: default for column "atcol1" cannot be cast automatically to type boolean -+ERROR: ALTER COLUMN TYPE from int to bool is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 -+-- -+you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ++ERROR: default for column "atcol1" cannot be cast automatically to type BOOL alter table anothertab alter column atcol1 drop default; alter table anothertab alter column atcol1 type boolean using case when atcol1 % 2 = 0 then true else false end; -- fails -ERROR: operator does not exist: boolean <= integer -HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +ERROR: ALTER COLUMN TYPE from int to bool is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` alter table anothertab drop constraint anothertab_chk; alter table anothertab drop constraint anothertab_chk; -- fails ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist -@@ -1962,13 +2279,14 @@ +@@ -1962,13 +2271,14 @@ NOTICE: constraint "anothertab_chk" of relation "anothertab" does not exist, skipping alter table anothertab alter column atcol1 type boolean using case when atcol1 % 2 = 0 then true else false end; +ERROR: ALTER COLUMN TYPE from int to bool is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` select * from anothertab; @@ -220252,7 +219147,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - drop table anothertab; -- Test index handling in alter table column type (cf. bugs #15835, #15865) -@@ -1976,33 +2294,45 @@ +@@ -1976,33 +2286,45 @@ f3 int, f4 int, f5 int); alter table anothertab add exclude using btree (f3 with =); @@ -220262,7 +219157,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + add exclude using btree (f3 with =) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev alter table anothertab add exclude using btree (f4 with =) where (f4 is not null); +ERROR: at or near "btree": syntax error: unimplemented: this syntax @@ -220271,7 +219166,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + add exclude using btree (f4 with =) where (f4 is not null) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev alter table anothertab add exclude using btree (f4 with =) where (f5 > 0); +ERROR: at or near "btree": syntax error: unimplemented: this syntax @@ -220280,7 +219175,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + add exclude using btree (f4 with =) where (f5 > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev alter table anothertab add unique(f1,f4); create index on anothertab(f2,f3); @@ -220316,7 +219211,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - alter table anothertab alter column f1 type bigint; alter table anothertab alter column f2 type bigint, -@@ -2010,24 +2340,15 @@ +@@ -2010,24 +2332,15 @@ alter column f4 type bigint; alter table anothertab alter column f5 type bigint; \d anothertab @@ -220350,12 +219245,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - drop table anothertab; -- test that USING expressions are parsed before column alter type / drop steps create table another (f1 int, f2 text, f3 text); -@@ -2046,12 +2367,16 @@ +@@ -2046,12 +2359,16 @@ alter f1 type text using f2 || ' and ' || f3 || ' more', alter f2 type bigint using f1 * 10, drop column f3; +ERROR: ALTER COLUMN TYPE from int to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` select * from another; @@ -220372,13 +219267,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (3 rows) drop table another; -@@ -2064,99 +2389,155 @@ +@@ -2064,99 +2381,155 @@ -- We disallow changing table's row type if it's used for storage create table at_tab1 (a int, b text); create table at_tab2 (x int, y at_tab1); +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev alter table at_tab1 alter column b type varchar; -- fails -ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type drop table at_tab2; @@ -220402,7 +219297,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - create table at_tab2 (x int, y at_tab1); +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev alter table at_tab1 alter column b type varchar; -- fails -ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type drop table at_tab1, at_tab2; @@ -220577,7 +219472,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - create temp table old_oids as select relname, oid as oldoid, relfilenode as oldfilenode from pg_class where relname like 'at_partitioned%'; -@@ -2172,27 +2553,19 @@ +@@ -2172,27 +2545,19 @@ from pg_class c left join old_oids using (relname) where relname like 'at_partitioned%' order by relname; @@ -220612,7 +219507,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- Note: these tests currently show the wrong behavior for comments :-( select relname, c.oid = oldoid as orig_oid, -@@ -2206,264 +2579,321 @@ +@@ -2206,264 +2571,321 @@ from pg_class c left join old_oids using (relname) where relname like 'at_partitioned%' order by relname; @@ -220651,26 +219546,26 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -ERROR: composite type recur1 cannot be made a member of itself +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev alter table recur1 add column f2 recur1[]; -- fails -ERROR: composite type recur1 cannot be made a member of itself +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev create domain array_of_recur1 as recur1[]; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain array_of_recur1 as recur1[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev alter table recur1 add column f2 array_of_recur1; -- fails -ERROR: composite type recur1 cannot be made a member of itself +ERROR: type "array_of_recur1" does not exist create temp table recur2 (f1 int, f2 recur1); +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev alter table recur1 add column f2 recur2; -- fails -ERROR: composite type recur1 cannot be made a member of itself +ERROR: type "recur2" does not exist @@ -220785,7 +219680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d test_inh_check - Table "public.test_inh_check" - Column | Type | Collation | Nullable | Default @@ -220840,7 +219735,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +ERROR: ALTER COLUMN TYPE from float to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` \d test_inh_check @@ -220972,7 +219867,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric; -NOTICE: merging constraint "bmerged" with inherited definition +ERROR: ALTER COLUMN TYPE from float to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` \d test_inh_check @@ -221046,13 +219941,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE test_type_diff ADD COLUMN f2 int; INSERT INTO test_type_diff_c VALUES (1, 2, 3); +ERROR: relation "test_type_diff_c" does not exist ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint; +ERROR: ALTER COLUMN TYPE from int to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8); @@ -221082,7 +219977,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9); ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8; +ERROR: ALTER COLUMN TYPE from int4 to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- whole-row references are disallowed @@ -221090,13 +219985,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -ERROR: cannot convert whole-row table reference -DETAIL: USING expression contains a whole-row table reference. +ERROR: ALTER COLUMN TYPE from int4 to int4 is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- check for rollback of ANALYZE corrupting table property flags (bug #11638) CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text); CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text); -@@ -2472,14 +2902,15 @@ +@@ -2472,14 +2894,15 @@ ANALYZE check_fk_presence_2; ROLLBACK; \d check_fk_presence_2 @@ -221120,7 +220015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE check_fk_presence_1, check_fk_presence_2; -- check column addition within a view (bug #14876) create table at_base_table(id int, stuff text); -@@ -2487,80 +2918,68 @@ +@@ -2487,80 +2910,68 @@ create view at_view_1 as select * from at_base_table bt; create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; \d+ at_view_1 @@ -221253,7 +220148,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (1 row) drop view at_view_2; -@@ -2573,27 +2992,36 @@ +@@ -2573,27 +2984,36 @@ create temp view v2 as select * from v1; create or replace temp view v1 with (security_barrier = true) as select * from t1; @@ -221305,7 +220200,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - rollback; -- check adding a column not itself requiring a rewrite, together with -- a column requiring a default (bug #16038) -@@ -2612,6 +3040,18 @@ +@@ -2612,6 +3032,18 @@ RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename); END; $$; @@ -221324,7 +220219,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - CREATE TABLE rewrite_test(col text); INSERT INTO rewrite_test VALUES ('something'); INSERT INTO rewrite_test VALUES (NULL); -@@ -2621,42 +3061,26 @@ +@@ -2621,42 +3053,26 @@ ADD COLUMN empty1 text, ADD COLUMN notempty1_rewrite serial; $$); @@ -221371,7 +220266,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- then with rewrite SELECT check_ddl_rewrite('rewrite_test', $$ ALTER TABLE rewrite_test -@@ -2664,24 +3088,17 @@ +@@ -2664,24 +3080,17 @@ ADD COLUMN notempty5_norewrite int default 42, ADD COLUMN notempty5_rewrite serial; $$); @@ -221399,7 +220294,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE rewrite_test; -- -- lock levels -@@ -2700,7 +3117,7 @@ +@@ -2700,7 +3109,7 @@ ,'AccessExclusiveLock' ); drop view my_locks; @@ -221408,7 +220303,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - create or replace view my_locks as select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode from pg_locks l join pg_class c on l.relation = c.oid -@@ -2712,104 +3129,90 @@ +@@ -2712,104 +3121,90 @@ and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') and c.relname != 'my_locks' group by c.relname; @@ -221560,16 +220455,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - rollback; begin; create trigger ttdummy -@@ -2817,52 +3220,34 @@ +@@ -2817,52 +3212,31 @@ for each row execute procedure ttdummy (1, 1); -+ERROR: at or near "ttdummy": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger ttdummy -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev select * from my_locks order by 1; - relname | max_lockmode ------------+----------------------- @@ -221626,7 +220518,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - rollback; create or replace view my_locks as select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode -@@ -2875,40 +3260,57 @@ +@@ -2875,40 +3249,57 @@ and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') and c.relname = 'my_locks' group by c.relname; @@ -221696,7 +220588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - drop type lockmodes; -- -- alter function -@@ -2950,60 +3352,178 @@ +@@ -2950,60 +3341,178 @@ -- create schema alter1; create schema alter2; @@ -221704,7 +220596,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create sequence f1_seq; +ERROR: relation "root.public.f1_seq" already exists +create table alter1.t1(f1 int primary key default nextval('f1_seq'), f2 int check (f2 > 0)); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html create view alter1.v1 as select * from alter1.t1; create function alter1.plus1(int) returns int as 'select $1+1' language sql; create domain alter1.posint integer check (value > 0); @@ -221713,7 +220605,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create domain alter1.posint integer check (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create type alter1.ctype as (f1 int, f2 text); create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; @@ -221727,7 +220619,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as operator 1 alter1.=(alter1.ctype, alter1.ctype); +ERROR: at or near "class": syntax error: unimplemented: this syntax @@ -221735,7 +220627,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8; +ERROR: at or near "alter1": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -221758,28 +220650,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev create text search configuration alter1.cfg(parser = alter1.prs); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create text search configuration alter1.cfg(parser = alter1.prs) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev create text search dictionary alter1.dict(template = alter1.tmpl); +ERROR: at or near "search": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create text search dictionary alter1.dict(template = alter1.tmpl) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/7821/v24.2 ++See: https://go.crdb.dev/issue-v/7821/dev insert into alter1.t1(f2) values(11); insert into alter1.t1(f2) values(12); alter table alter1.t1 set schema alter1; -- no-op, same schema @@ -221889,7 +220781,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - select alter2.plus1(41); plus1 -@@ -3013,225 +3533,389 @@ +@@ -3013,225 +3522,389 @@ -- clean up drop schema alter2 cascade; @@ -221933,14 +220825,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE nosuchtype ADD ATTRIBUTE b text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type ADD ATTRIBUTE b text; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type ADD ATTRIBUTE b text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -221964,14 +220856,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type ADD ATTRIBUTE b text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -221994,7 +220886,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -222017,7 +220909,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type DROP ATTRIBUTE b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -222040,7 +220932,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type DROP ATTRIBUTE c + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; -NOTICE: column "c" of relation "test_type" does not exist, skipping +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -222048,14 +220940,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -222078,14 +220970,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type - Composite type "public.test_type" - Column | Type | Collation | Nullable | Default @@ -222111,7 +221003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev DROP TABLE test_tbl1; CREATE TABLE test_tbl1 (x int, y text); CREATE INDEX test_tbl1_idx ON test_tbl1((row(x,y)::test_type1)); @@ -222123,7 +221015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev DROP TABLE test_tbl1; DROP TYPE test_type1; CREATE TYPE test_type2 AS (a int, b text); @@ -222139,7 +221031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev \d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default @@ -222182,14 +221074,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type2 ADD ATTRIBUTE c text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default @@ -222234,14 +221126,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default @@ -222286,14 +221178,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type2 DROP ATTRIBUTE b + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default @@ -222336,14 +221228,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +ERROR: at or near "cascade": syntax error: unimplemented: this syntax +DETAIL: source SQL: +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_type2 - Composite type "public.test_type2" - Column | Type | Collation | Nullable | Default @@ -222409,7 +221301,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_typex DROP ATTRIBUTE a + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; -NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx +ERROR: at or near "EOF": syntax error: unimplemented: this syntax @@ -222417,7 +221309,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev \d test_tblx - Table "public.test_tblx" - Column | Type | Collation | Nullable | Default @@ -222437,7 +221329,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE test_tblx; DROP TYPE test_typex; -- This test isn't that interesting on its own, but the purpose is to leave -@@ -3240,6 +3924,12 @@ +@@ -3240,6 +3913,12 @@ CREATE TYPE test_type3 AS (a int); CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; @@ -222446,11 +221338,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev CREATE TYPE test_type_empty AS (); DROP TYPE test_type_empty; -- -@@ -3247,6 +3937,12 @@ +@@ -3247,6 +3926,12 @@ -- CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); ALTER TYPE tt_t0 DROP ATTRIBUTE z; @@ -222459,11 +221351,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TYPE tt_t0 DROP ATTRIBUTE z + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK CREATE TABLE tt1 (x int, y bigint); -- wrong base type CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod -@@ -3254,76 +3950,128 @@ +@@ -3254,76 +3939,128 @@ CREATE TABLE tt4 (x int); -- too few columns CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent @@ -222472,7 +221364,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE tt6 () INHERITS (tt0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE tt7 (x int, q text, y numeric(8,2)); ALTER TABLE tt7 DROP q; -- OK ALTER TABLE tt0 OF tt_t0; @@ -222566,7 +221458,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +ERROR: constraint "test_drop_constr_parent_c_check" of relation "test_drop_constr_parent" does not exist -- should fail @@ -222623,7 +221515,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE alter2.tt8; DROP SCHEMA alter2; -- -@@ -3334,32 +4082,28 @@ +@@ -3334,32 +4071,28 @@ ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3); ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name @@ -222636,13 +221528,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ERROR: index with name "tt9_c_key" already exists ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name -ERROR: constraint "foo" for relation "tt9" already exists -+ERROR: error executing StatementPhase stage 1 of 1 with 7 MutationType ops: relation "tt9" (1984): duplicate constraint name: "foo" ++ERROR: error executing StatementPhase stage 1 of 1 with 7 MutationType ops: relation "tt9" (1987): duplicate constraint name: "foo" ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name -ERROR: constraint "tt9_c_key" for relation "tt9" already exists -+ERROR: error executing StatementPhase stage 1 of 1 with 2 MutationType ops: relation "tt9" (1984): duplicate constraint name: "tt9_c_key" ++ERROR: error executing StatementPhase stage 1 of 1 with 2 MutationType ops: relation "tt9" (1987): duplicate constraint name: "tt9_c_key" ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name -+ERROR: error executing StatementPhase stage 1 of 1 with 7 MutationType ops: relation "tt9" (1984): duplicate constraint name: "tt9_c_key2" ++ERROR: error executing StatementPhase stage 1 of 1 with 7 MutationType ops: relation "tt9" (1987): duplicate constraint name: "tt9_c_key2" \d tt9 - Table "public.tt9" - Column | Type | Collation | Nullable | Default @@ -222670,7 +221562,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE tt9; -- Check that comments on constraints and indexes are not lost at ALTER TABLE. CREATE TABLE comment_test ( -@@ -3371,6 +4115,7 @@ +@@ -3371,6 +4104,7 @@ COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test'; COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test'; COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col'; @@ -222678,7 +221570,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test'; COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; SELECT col_description('comment_test'::regclass, 1) as comment; -@@ -3387,10 +4132,10 @@ +@@ -3387,10 +4121,10 @@ (2 rows) SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; @@ -222693,24 +221585,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (2 rows) -- Change the datatype of all the columns. ALTER TABLE is optimized to not -@@ -3399,8 +4144,16 @@ +@@ -3399,8 +4133,16 @@ -- first, to test that no-op codepath, and another one that does. ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int; ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text; +ERROR: ALTER COLUMN TYPE from int to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int; ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ERROR: ALTER COLUMN TYPE from int to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int; ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; -- Check that the comments are intact. -@@ -3418,10 +4171,10 @@ +@@ -3418,10 +4160,10 @@ (2 rows) SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; @@ -222725,7 +221617,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (2 rows) -- Check compatibility for foreign keys and comments. This is done -@@ -3429,34 +4182,33 @@ +@@ -3429,34 +4171,33 @@ -- to an error and would reduce the test scope. CREATE TABLE comment_test_child ( id text CONSTRAINT comment_test_child_fk REFERENCES comment_test); @@ -222741,14 +221633,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- Change column type of parent ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ERROR: ALTER COLUMN TYPE from int to string is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer; -ERROR: foreign key constraint "comment_test_child_fk" cannot be implemented -DETAIL: Key columns "id" and "id" are of incompatible types: text and integer. +ERROR: ALTER COLUMN TYPE from int to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- Comments should be intact @@ -222776,7 +221668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- Check that we map relation oids to filenodes and back correctly. Only -- display bad mappings so the test output doesn't change all the time. A -- filenode function call can return NULL for a relation dropped concurrently -@@ -3468,31 +4220,28 @@ +@@ -3468,31 +4209,28 @@ FROM pg_class, pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; @@ -222804,7 +221696,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ERROR: schema cannot be modified: "pg_catalog" -- instead create in public first, move to catalog CREATE TABLE new_system_table(id serial primary key, othercol text); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html ALTER TABLE new_system_table SET SCHEMA pg_catalog; +ERROR: cannot move objects into or out of virtual schemas ALTER TABLE new_system_table SET SCHEMA public; @@ -222816,24 +221708,24 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - ALTER TABLE new_system_table RENAME TO old_system_table; CREATE INDEX old_system_table__othercol ON old_system_table (othercol); INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata'); -@@ -3500,10 +4249,16 @@ +@@ -3500,10 +4238,16 @@ DELETE FROM old_system_table WHERE othercol = 'somedata'; TRUNCATE old_system_table; ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey; -+ERROR: relation "old_system_table" (1987): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ++ERROR: relation "old_system_table" (1990): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48026/v24.2 ++See: https://go.crdb.dev/issue-v/48026/dev ALTER TABLE old_system_table DROP COLUMN othercol; +NOTICE: dropping index "old_system_table__othercol" which depends on column "othercol" DROP TABLE old_system_table; -- set logged CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html +NOTICE: UNLOGGED TABLE will behave as a regular table in CockroachDB -- check relpersistence of an unlogged table SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' UNION ALL -@@ -3511,21 +4266,36 @@ +@@ -3511,21 +4255,36 @@ UNION ALL SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' ORDER BY relname; @@ -222852,10 +221744,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +(2 rows) CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html +NOTICE: UNLOGGED TABLE will behave as a regular table in CockroachDB CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html +NOTICE: UNLOGGED TABLE will behave as a regular table in CockroachDB ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key +ERROR: at or near "logged": syntax error @@ -222879,7 +221771,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- check relpersistence of an unlogged table after changing to permanent SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' UNION ALL -@@ -3533,21 +4303,24 @@ +@@ -3533,21 +4292,24 @@ UNION ALL SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' ORDER BY relname; @@ -222908,11 +221800,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE unlogged1; -- set unlogged CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html -- check relpersistence of a permanent table SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' UNION ALL -@@ -3555,22 +4328,40 @@ +@@ -3555,22 +4317,40 @@ UNION ALL SELECT r.relname ||' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' ORDER BY relname; @@ -222931,9 +221823,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +(2 rows) CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists -ERROR: could not change table "logged1" to unlogged because it references logged table "logged2" +ERROR: at or near "unlogged": syntax error @@ -222962,7 +221854,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- check relpersistence of a permanent table after changing to unlogged SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' UNION ALL -@@ -3578,36 +4369,45 @@ +@@ -3578,36 +4358,45 @@ UNION ALL SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' ORDER BY relname; @@ -223027,7 +221919,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - ALTER TABLE test_add_column ADD COLUMN c2 integer; -- fail because c2 already exists ERROR: column "c2" of relation "test_add_column" already exists -@@ -3615,159 +4415,132 @@ +@@ -3615,159 +4404,132 @@ ADD COLUMN c2 integer; -- fail because c2 already exists ERROR: column "c2" of relation "test_add_column" already exists \d test_add_column @@ -223279,7 +222171,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- assorted cases with multiple ALTER TABLE steps CREATE TABLE ataddindex(f1 INT); INSERT INTO ataddindex VALUES (42), (43); -@@ -3775,100 +4548,134 @@ +@@ -3775,100 +4537,134 @@ ALTER TABLE ataddindex ADD PRIMARY KEY USING INDEX ataddindexi0, ALTER f1 TYPE BIGINT; @@ -223414,7 +222306,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/46657/v24.2 ++See: https://go.crdb.dev/issue-v/46657/dev -- cannot drop column that is part of the partition key ALTER TABLE partitioned DROP COLUMN a; -ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned" @@ -223464,7 +222356,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- -- ATTACH PARTITION -- -@@ -3878,7 +4685,11 @@ +@@ -3878,7 +4674,11 @@ ); CREATE TABLE fail_part (like unparted); ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a'); @@ -223477,7 +222369,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE unparted, fail_part; -- check that partition bound is compatible CREATE TABLE list_parted ( -@@ -3886,62 +4697,146 @@ +@@ -3886,62 +4686,146 @@ b char(2) COLLATE "C", CONSTRAINT check_a CHECK (a > 0) ) PARTITION BY LIST (a); @@ -223521,7 +222413,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +SET SESSION AUTHORIZATION regress_test_me + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE owned_by_me ( a int ) PARTITION BY LIST (a); @@ -223558,7 +222450,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE child () INHERITS (parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1); -ERROR: cannot attach inheritance child as partition +ERROR: at or near "attach": syntax error @@ -223612,7 +222504,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -NOTICE: drop cascades to table fail_part +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- check that the table being attached has only columns present in the parent CREATE TABLE fail_part (like list_parted, c int); +ERROR: relation "list_parted" does not exist @@ -223638,7 +222530,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - DROP TABLE fail_part; -- check that columns match in type, collation and NOT NULL status CREATE TABLE fail_part ( -@@ -3949,158 +4844,335 @@ +@@ -3949,158 +4833,335 @@ a int NOT NULL ); ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); @@ -223649,7 +222541,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - + ^ +HINT: try \h ALTER TABLE ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX"; -+ERROR: invalid locale POSIX ++ERROR: invalid locale posix ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); -ERROR: child table "fail_part" has different collation for column "b" +ERROR: at or near "attach": syntax error @@ -224003,7 +222895,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- Check the case where attnos of the partitioning columns in the table being -- attached differs from the parent. It should not affect the constraint- -- checking logic that allows to skip the scan. -@@ -4109,8 +5181,15 @@ +@@ -4109,8 +5170,15 @@ LIKE list_parted2, CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6) ); @@ -224019,7 +222911,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- Similar to above, but the table being attached is a partitioned table -- whose partition has still different attnos for the root partitioning -- columns. -@@ -4118,6 +5197,14 @@ +@@ -4118,6 +5186,14 @@ LIKE list_parted2, CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) ) PARTITION BY LIST (b); @@ -224034,7 +222926,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - CREATE TABLE part_7_a_null ( c int, d int, -@@ -4126,63 +5213,150 @@ +@@ -4126,63 +5202,150 @@ CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'), CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) ); @@ -224198,7 +223090,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- check validation when attaching hash partitions -- Use hand-rolled hash functions and operator class to get predictable result -- on different machines. part_test_int4_ops is defined in insert.sql. -@@ -4191,233 +5365,430 @@ +@@ -4191,233 +5354,430 @@ a int, b int ) PARTITION BY HASH (a part_test_int4_ops); @@ -224645,7 +223537,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +CREATE TABLE inh_test () INHERITS (part_2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE inh_test (LIKE part_2); +ERROR: relation "part_2" does not exist ALTER TABLE inh_test INHERIT part_2; @@ -224713,7 +223605,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- attnum for key attribute 'a' is different in p, p1, and p11 select attrelid::regclass, attname, attnum from pg_attribute -@@ -4426,73 +5797,156 @@ +@@ -4426,73 +5786,156 @@ or attrelid = 'p1'::regclass or attrelid = 'p11'::regclass) order by attrelid::regclass::text; @@ -224882,19 +223774,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - create or replace function func_part_attach() returns trigger language plpgsql as $$ begin -@@ -4500,14 +5954,21 @@ +@@ -4500,14 +5943,20 @@ execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)'; return null; end $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger trig_part_attach before insert on tab_part_attach for each statement execute procedure func_part_attach(); -+ERROR: at or near "trig_part_attach": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger trig_part_attach before insert on tab_part_attach -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev insert into tab_part_attach values (1); -ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session -CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)" @@ -224907,7 +223798,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- test case where the partitioning operator is a SQL function whose -- evaluation results in the table's relcache being rebuilt partway through -- the execution of an ATTACH PARTITION command -@@ -4517,11 +5978,43 @@ +@@ -4517,11 +5966,43 @@ operator 1 < (int4, int4), operator 2 <= (int4, int4), operator 3 = (int4, int4), operator 4 >= (int4, int4), operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4); @@ -224916,7 +223807,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - +create operator class at_test_sql_partop for type int4 using btree as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop); +ERROR: at or near "at_test_sql_partop": syntax error +DETAIL: source SQL: @@ -224951,7 +223842,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - drop function at_test_sql_partop; /* Test case for bug #16242 */ -- We create a parent and child where the child has missing -@@ -4529,18 +6022,25 @@ +@@ -4529,18 +6010,25 @@ -- tuple conversion from the child to the parent tupdesc create table bar1 (a integer, b integer not null default 1) partition by range (a); @@ -224982,21 +223873,20 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - -- this exercises tuple conversion: create function xtrig() returns trigger language plpgsql -@@ -4554,22 +6054,38 @@ +@@ -4554,22 +6042,37 @@ return NULL; end; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger xtrig after update on bar1 referencing old table as old for each statement execute procedure xtrig(); -+ERROR: at or near "xtrig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger xtrig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev update bar1 set a = a + 1; -INFO: a=1, b=1 +ERROR: relation "bar1" does not exist @@ -225022,7 +223912,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - create table atref (c1 int references attbl(p1)); alter table attbl alter column p1 set data type bigint; alter table atref alter column c1 set data type bigint; -@@ -4579,15 +6095,21 @@ +@@ -4579,15 +6082,21 @@ -- for normal indexes and indexes on constraints. create table alttype_cluster (a int); alter table alttype_cluster add primary key (a); @@ -225045,7 +223935,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - alttype_cluster_pkey | f (2 rows) -@@ -4597,19 +6119,24 @@ +@@ -4597,19 +6106,24 @@ order by indexrelid::regclass::text; indexrelid | indisclustered ----------------------+---------------- @@ -225072,7 +223962,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (2 rows) alter table alttype_cluster alter a type int; -@@ -4619,7 +6146,7 @@ +@@ -4619,7 +6133,7 @@ indexrelid | indisclustered ----------------------+---------------- alttype_cluster_ind | f @@ -225081,7 +223971,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/alter_table.out - (2 rows) drop table alttype_cluster; -@@ -4628,38 +6155,97 @@ +@@ -4628,38 +6142,97 @@ -- to its partitions' constraint being updated to reflect the parent's -- newly added/removed constraint create table target_parted (a int, b int) partition by list (a); @@ -225577,7 +224467,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- dual non-polymorphic transfn CREATE FUNCTION tfnp(int[],int) RETURNS int[] AS 'select $1 || $2' LANGUAGE SQL; -@@ -301,6 +319,35 @@ +@@ -301,6 +319,36 @@ -- multi-arg polymorphic CREATE FUNCTION sum3(anyelement,anyelement,anyelement) returns anyelement AS 'select $1+$2+$3' language sql strict; @@ -225608,12 +224498,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out ++(int, inet) -> inet ++(decimal, pg_lsn) -> pg_lsn ++(pg_lsn, decimal) -> pg_lsn +++(vector, vector) -> vector ++(int, int) -> int + -- finalfn polymorphic CREATE FUNCTION ffp(anyarray) RETURNS anyarray AS 'select $1' LANGUAGE SQL; -@@ -323,28 +370,58 @@ +@@ -323,28 +371,58 @@ -- should CREATE CREATE AGGREGATE myaggp01a(*) (SFUNC = stfnp, STYPE = int4[], FINALFUNC = ffp, INITCOND = '{}'); @@ -225622,7 +224513,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp01a(*) (SFUNC = stfnp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N -- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) CREATE AGGREGATE myaggp02a(*) (SFUNC = stfnp, STYPE = anyarray, @@ -225634,7 +224525,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp02a(*) (SFUNC = stfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P -- should CREATE CREATE AGGREGATE myaggp03a(*) (SFUNC = stfp, STYPE = int4[], @@ -225644,7 +224535,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp03a(*) (SFUNC = stfp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp03b(*) (SFUNC = stfp, STYPE = int4[], INITCOND = '{}'); +ERROR: at or near "myaggp03b": syntax error: unimplemented: this syntax @@ -225652,7 +224543,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp03b(*) (SFUNC = stfp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P -- should ERROR: we have no way to resolve S CREATE AGGREGATE myaggp04a(*) (SFUNC = stfp, STYPE = anyarray, @@ -225664,7 +224555,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp04a(*) (SFUNC = stfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp04b(*) (SFUNC = stfp, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -225674,11 +224565,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp04b(*) (SFUNC = stfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- Case2 (R = P) && ((B = P) || (B = N)) -- ------------------------------------- -- S tf1 B tf2 -@@ -353,103 +430,226 @@ +@@ -353,103 +431,226 @@ -- should CREATE CREATE AGGREGATE myaggp05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], FINALFUNC = ffp, INITCOND = '{}'); @@ -225687,7 +224578,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N N P -- should CREATE CREATE AGGREGATE myaggp06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], @@ -225697,7 +224588,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N P N -- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) CREATE AGGREGATE myaggp07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], @@ -225708,7 +224599,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N P P -- should CREATE CREATE AGGREGATE myaggp08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], @@ -225718,7 +224609,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P N N -- should CREATE CREATE AGGREGATE myaggp09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], @@ -225728,7 +224619,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp09b(BASETYPE = int, SFUNC = tf1p, STYPE = int[], INITCOND = '{}'); +ERROR: at or near "myaggp09b": syntax error: unimplemented: this syntax @@ -225736,7 +224627,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp09b(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P N P -- should CREATE CREATE AGGREGATE myaggp10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], @@ -225746,7 +224637,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp10b(BASETYPE = int, SFUNC = tfp, STYPE = int[], INITCOND = '{}'); +ERROR: at or near "myaggp10b": syntax error: unimplemented: this syntax @@ -225754,7 +224645,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp10b(BASETYPE = int, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P P N -- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) CREATE AGGREGATE myaggp11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], @@ -225765,7 +224656,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp11b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], INITCOND = '{}'); -ERROR: function tf1p(integer[], anyelement) does not exist @@ -225774,7 +224665,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp11b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P P P -- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) CREATE AGGREGATE myaggp12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], @@ -225785,7 +224676,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp12b(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], INITCOND = '{}'); -ERROR: function tfp(integer[], anyelement) does not exist @@ -225794,7 +224685,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp12b(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N N N -- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) CREATE AGGREGATE myaggp13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, @@ -225806,7 +224697,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N N P -- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) CREATE AGGREGATE myaggp14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, @@ -225818,7 +224709,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N P N -- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) CREATE AGGREGATE myaggp15a(BASETYPE = anyelement, SFUNC = tfnp, @@ -225829,7 +224720,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp15a(BASETYPE = anyelement, SFUNC = tfnp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N P P -- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) CREATE AGGREGATE myaggp16a(BASETYPE = anyelement, SFUNC = tf2p, @@ -225840,7 +224731,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp16a(BASETYPE = anyelement, SFUNC = tf2p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P N N -- should ERROR: we have no way to resolve S CREATE AGGREGATE myaggp17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, @@ -225852,7 +224743,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp17b(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -225862,7 +224753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp17b(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P N P -- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) CREATE AGGREGATE myaggp18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, @@ -225874,7 +224765,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp18b(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -225884,7 +224775,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp18b(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P P N -- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) CREATE AGGREGATE myaggp19a(BASETYPE = anyelement, SFUNC = tf1p, @@ -225895,7 +224786,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp19a(BASETYPE = anyelement, SFUNC = tf1p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp19b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = anyarray, INITCOND = '{}'); -ERROR: function tf1p(anyarray, anyelement) does not exist @@ -225904,7 +224795,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp19b(BASETYPE = anyelement, SFUNC = tf1p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P P P -- should CREATE CREATE AGGREGATE myaggp20a(BASETYPE = anyelement, SFUNC = tfp, @@ -225914,7 +224805,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp20a(BASETYPE = anyelement, SFUNC = tfp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggp20b(BASETYPE = anyelement, SFUNC = tfp, STYPE = anyarray, INITCOND = '{}'); +ERROR: at or near "myaggp20b": syntax error: unimplemented: this syntax @@ -225922,11 +224813,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggp20b(BASETYPE = anyelement, SFUNC = tfp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- Case3 (R = N) && (B = A) -- ------------------------ -- S tf1 -@@ -458,28 +658,58 @@ +@@ -458,28 +659,58 @@ -- should CREATE CREATE AGGREGATE myaggn01a(*) (SFUNC = stfnp, STYPE = int4[], FINALFUNC = ffnp, INITCOND = '{}'); @@ -225935,7 +224826,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn01a(*) (SFUNC = stfnp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn01b(*) (SFUNC = stfnp, STYPE = int4[], INITCOND = '{}'); +ERROR: at or near "myaggn01b": syntax error: unimplemented: this syntax @@ -225943,7 +224834,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn01b(*) (SFUNC = stfnp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N -- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) CREATE AGGREGATE myaggn02a(*) (SFUNC = stfnp, STYPE = anyarray, @@ -225955,7 +224846,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn02a(*) (SFUNC = stfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn02b(*) (SFUNC = stfnp, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -225965,7 +224856,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn02b(*) (SFUNC = stfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P -- should CREATE CREATE AGGREGATE myaggn03a(*) (SFUNC = stfp, STYPE = int4[], @@ -225975,7 +224866,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn03a(*) (SFUNC = stfp, STYPE = int4[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P -- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) CREATE AGGREGATE myaggn04a(*) (SFUNC = stfp, STYPE = anyarray, @@ -225987,11 +224878,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn04a(*) (SFUNC = stfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- Case4 (R = N) && ((B = P) || (B = N)) -- ------------------------------------- -- S tf1 B tf2 -@@ -488,107 +718,235 @@ +@@ -488,107 +719,235 @@ -- should CREATE CREATE AGGREGATE myaggn05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], FINALFUNC = ffnp, INITCOND = '{}'); @@ -226000,7 +224891,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn05b(BASETYPE = int, SFUNC = tfnp, STYPE = int[], INITCOND = '{}'); +ERROR: at or near "myaggn05b": syntax error: unimplemented: this syntax @@ -226008,7 +224899,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn05b(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N N P -- should CREATE CREATE AGGREGATE myaggn06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], @@ -226018,7 +224909,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn06b(BASETYPE = int, SFUNC = tf2p, STYPE = int[], INITCOND = '{}'); +ERROR: at or near "myaggn06b": syntax error: unimplemented: this syntax @@ -226026,7 +224917,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn06b(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N P N -- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) CREATE AGGREGATE myaggn07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], @@ -226037,7 +224928,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn07b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], INITCOND = '{}'); -ERROR: function tfnp(integer[], anyelement) does not exist @@ -226046,7 +224937,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn07b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N N P P -- should CREATE CREATE AGGREGATE myaggn08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], @@ -226056,7 +224947,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn08b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], INITCOND = '{}'); +ERROR: at or near "myaggn08b": syntax error: unimplemented: this syntax @@ -226064,7 +224955,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn08b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P N N -- should CREATE CREATE AGGREGATE myaggn09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], @@ -226074,7 +224965,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P N P -- should CREATE CREATE AGGREGATE myaggn10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], @@ -226084,7 +224975,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P P N -- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) CREATE AGGREGATE myaggn11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], @@ -226095,7 +224986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- N P P P -- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) CREATE AGGREGATE myaggn12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], @@ -226106,7 +224997,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N N N -- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) CREATE AGGREGATE myaggn13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, @@ -226118,7 +225009,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn13b(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -226128,7 +225019,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn13b(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N N P -- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) CREATE AGGREGATE myaggn14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, @@ -226140,7 +225031,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn14b(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, INITCOND = '{}'); -ERROR: cannot determine transition data type @@ -226150,7 +225041,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn14b(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N P N -- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) CREATE AGGREGATE myaggn15a(BASETYPE = anyelement, SFUNC = tfnp, @@ -226161,7 +225052,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn15a(BASETYPE = anyelement, SFUNC = tfnp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn15b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = anyarray, INITCOND = '{}'); -ERROR: function tfnp(anyarray, anyelement) does not exist @@ -226170,7 +225061,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn15b(BASETYPE = anyelement, SFUNC = tfnp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P N P P -- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) CREATE AGGREGATE myaggn16a(BASETYPE = anyelement, SFUNC = tf2p, @@ -226181,7 +225072,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn16a(BASETYPE = anyelement, SFUNC = tf2p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev CREATE AGGREGATE myaggn16b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = anyarray, INITCOND = '{}'); -ERROR: function tf2p(anyarray, anyelement) does not exist @@ -226190,7 +225081,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn16b(BASETYPE = anyelement, SFUNC = tf2p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P N N -- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) CREATE AGGREGATE myaggn17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, @@ -226202,7 +225093,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P N P -- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) CREATE AGGREGATE myaggn18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, @@ -226214,7 +225105,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P P N -- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) CREATE AGGREGATE myaggn19a(BASETYPE = anyelement, SFUNC = tf1p, @@ -226225,7 +225116,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn19a(BASETYPE = anyelement, SFUNC = tf1p, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- P P P P -- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) CREATE AGGREGATE myaggn20a(BASETYPE = anyelement, SFUNC = tfp, @@ -226236,7 +225127,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE myaggn20a(BASETYPE = anyelement, SFUNC = tfp, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- multi-arg polymorphic CREATE AGGREGATE mysum2(anyelement,anyelement) (SFUNC = sum3, STYPE = anyelement, INITCOND = '0'); @@ -226245,11 +225136,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE mysum2(anyelement,anyelement) (SFUNC = sum3, + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- create test data for polymorphic aggregates create temp table t(f1 int, f2 int[], f3 text); insert into t values(1,array[1],'a'); -@@ -601,220 +959,66 @@ +@@ -601,220 +960,66 @@ insert into t values(3,array[3],'b'); -- test the successfully created polymorphic aggregates select f3, myaggp01a(*) from t group by f3 order by f3; @@ -226496,7 +225387,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select q2, sql_if(q2 > 0, q2, q2 + 1) from int8_tbl; q2 | sql_if -------------------+------------------- -@@ -832,20 +1036,18 @@ +@@ -832,20 +1037,18 @@ stype = anyarray, initcond = '{}' ); @@ -226505,7 +225396,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +CREATE AGGREGATE array_larger_accum (anyarray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev SELECT array_larger_accum(i) FROM (VALUES (ARRAY[1,2]), (ARRAY[3,4])) as t(i); - array_larger_accum @@ -226525,7 +225416,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- another kind of polymorphic aggregate create function add_group(grp anyarray, ad anyelement, size integer) returns anyarray -@@ -861,30 +1063,137 @@ +@@ -861,30 +1064,137 @@ end; $$ language plpgsql immutable; @@ -226585,7 +225476,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create aggregate build_group(anyelement, integer) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev select build_group(q1,3) from int8_tbl; - build_group ----------------------------- @@ -226604,7 +225495,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create aggregate build_group(int8, integer) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- but we can make a non-poly agg from a poly sfunc if types are OK create aggregate build_group(int8, integer) ( SFUNC = add_group, @@ -226615,7 +225506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create aggregate build_group(int8, integer) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev -- check proper resolution of data types for polymorphic transfn/finalfn create function first_el_transfn(anyarray, anyelement) returns anyarray as 'select $1 || $2' language sql immutable; @@ -226669,7 +225560,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out create function first_el(anyarray) returns anyelement as 'select $1[1]' language sql strict immutable; create aggregate first_el_agg_f8(float8) ( -@@ -892,186 +1201,225 @@ +@@ -892,186 +1202,228 @@ STYPE = float8[], FINALFUNC = first_el ); @@ -226678,7 +225569,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create aggregate first_el_agg_f8(float8) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev create aggregate first_el_agg_any(anyelement) ( SFUNC = first_el_transfn, STYPE = anyarray, @@ -226689,7 +225580,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create aggregate first_el_agg_any(anyelement) ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev select first_el_agg_f8(x::float8) from generate_series(1,10) x; - first_el_agg_f8 ------------------ @@ -226778,7 +225669,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function myleast(variadic anyarray) returns anyelement as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select myleast(10, 1, 20, 33); - myleast ---------- @@ -226877,15 +225768,16 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function concat(text, variadic anyarray) returns text as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select concat('%', 1, 2, 3, 4, 5); - concat ------------ - 1%2%3%4%5 --(1 row) -- -+ERROR: unknown signature: concat(string, int, int, int, int, int) -+HINT: No function matches the given name and argument types. You might need to add explicit type casts. ++ concat ++-------- ++ %12345 + (1 row) + select concat('|', 'a'::text, 'b', 'c'); concat -------- @@ -226934,7 +225826,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +We appreciate your feedback. drop function concat(text, anyarray); -+ERROR: function concat(string,anyarray) does not exist ++ERROR: concat(string,anyarray) is not a function -- mix variadic with anyelement create function formarray(anyelement, variadic anyarray) returns anyarray as $$ select array_prepend($1, $2); @@ -226944,7 +225836,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function formarray(anyelement, variadic anyarray) returns anyarray as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select formarray(1,2,3,4,5); - formarray -------------- @@ -227010,11 +225902,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +drop function formarray(anyelement, variadic anyarray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev -- test pg_typeof() function select pg_typeof(null); -- unknown pg_typeof -@@ -1082,7 +1430,7 @@ +@@ -1082,7 +1434,7 @@ select pg_typeof(0); -- integer pg_typeof ----------- @@ -227023,7 +225915,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out (1 row) select pg_typeof(0.0); -- numeric -@@ -1100,7 +1448,7 @@ +@@ -1100,7 +1452,7 @@ select pg_typeof('x'); -- unknown pg_typeof ----------- @@ -227032,7 +225924,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out (1 row) select pg_typeof('' || ''); -- text -@@ -1112,7 +1460,7 @@ +@@ -1112,7 +1464,7 @@ select pg_typeof(pg_typeof(0)); -- regtype pg_typeof ----------- @@ -227041,7 +225933,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out (1 row) select pg_typeof(array[1.2,55.5]); -- numeric[] -@@ -1122,11 +1470,7 @@ +@@ -1122,11 +1474,7 @@ (1 row) select pg_typeof(myleast(10, 1, 20, 33)); -- polymorphic input @@ -227054,7 +225946,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- test functions with default parameters -- test basic functionality create function dfunc(a int = 1, int = 2) returns int as $$ -@@ -1151,14 +1495,12 @@ +@@ -1151,14 +1499,12 @@ (1 row) select dfunc(10, 20, 30); -- fail @@ -227071,7 +225963,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out drop function dfunc(int, int); -- ok -- fail: defaults must be at end of argument list create function dfunc(a int = 1, b int) returns int as $$ -@@ -1169,21 +1511,29 @@ +@@ -1169,21 +1515,29 @@ create function dfunc(a int = 1, out sum int, b int = 2) as $$ select $1 + $2; $$ language sql; @@ -227112,7 +226004,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- check implicit coercion create function dfunc(a int DEFAULT 1.0, int DEFAULT '-1') returns int as $$ select $1 + $2; -@@ -1198,10 +1548,7 @@ +@@ -1198,10 +1552,7 @@ select $1 || ', ' || $2; $$ language sql; select dfunc(); -- fail: which dfunc should be called? int or text @@ -227124,7 +226016,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc('Hi'); -- ok dfunc ----------- -@@ -1237,20 +1584,17 @@ +@@ -1237,20 +1588,17 @@ -- Now, dfunc(nargs = 2) and dfunc(nargs = 4) are ambiguous when called -- with 0 to 2 arguments. select dfunc(); -- fail @@ -227154,7 +226046,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc(1, 2, 3); -- ok dfunc ------- -@@ -1274,84 +1618,79 @@ +@@ -1274,84 +1622,79 @@ create function dfunc(anyelement = 'World'::text) returns text as $$ select 'Hello, ' || $1::text; $$ language sql; @@ -227197,7 +226089,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function dfunc(a variadic int[]) returns int as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select dfunc(); -- fail -ERROR: function dfunc() does not exist -LINE 1: select dfunc(); @@ -227225,7 +226117,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create or replace function dfunc(a variadic int[] default array[]::int[]) returns int as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select dfunc(); -- now ok - dfunc -------- @@ -227257,7 +226149,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create or replace function dfunc(a variadic int[]) returns int as + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev \df dfunc - List of functions - Schema | Name | Result data type | Argument data types | Type @@ -227287,11 +226179,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +drop function dfunc(a variadic int[]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev -- Ambiguity should be reported only if there's not a better match available create function dfunc(int = 1, int = 2, int = 3) returns int as $$ select 3; -@@ -1364,10 +1703,10 @@ +@@ -1364,10 +1707,10 @@ $$ language sql; -- dfunc(narg=2) and dfunc(narg=3) are ambiguous select dfunc(1); -- fail @@ -227306,7 +226198,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- but this works since the ambiguous functions aren't preferred anyway select dfunc('Hi'); dfunc -@@ -1385,185 +1724,156 @@ +@@ -1385,185 +1728,156 @@ returns table (a int, b int, c int, d int) as $$ select $1, $2, $3, $4; $$ language sql; @@ -227316,7 +226208,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out + returns table (a int, b int, c int, d int) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select (dfunc(10,20,30)).*; - a | b | c | d -----+----+----+--- @@ -227442,7 +226334,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out + returns table (a varchar, b numeric, c date) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select (dfunc('Hello World', 20, '2009-07-25'::date)).*; - a | b | c --------------+----+------------ @@ -227592,7 +226484,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out --fail, named parameters are not unique create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql; ERROR: parameter name "a" used more than once -@@ -1583,13 +1893,16 @@ +@@ -1583,13 +1897,16 @@ drop function testpolym(int); create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql; @@ -227601,7 +226493,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select * from testpolym(37); - a ----- @@ -227614,7 +226506,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- test polymorphic params and defaults create function dfunc(a anyelement, b anyelement = null, flag bool = true) returns anyelement as $$ -@@ -1608,47 +1921,40 @@ +@@ -1608,47 +1925,40 @@ (1 row) select dfunc(a := 1, b := 2); @@ -227690,7 +226582,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc('a'::text, 'b', false); -- full positional notation dfunc ------- -@@ -1656,11 +1962,10 @@ +@@ -1656,11 +1966,10 @@ (1 row) select dfunc('a'::text, 'b', flag := false); -- mixed notation @@ -227706,7 +226598,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc('a'::text, 'b', true); -- full positional notation dfunc ------- -@@ -1668,54 +1973,46 @@ +@@ -1668,54 +1977,46 @@ (1 row) select dfunc('a'::text, 'b', flag := true); -- mixed notation @@ -227793,7 +226685,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc('a'::text, 'b', false); -- full positional notation dfunc ------- -@@ -1723,11 +2020,10 @@ +@@ -1723,11 +2024,10 @@ (1 row) select dfunc('a'::text, 'b', flag => false); -- mixed notation @@ -227809,7 +226701,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out select dfunc('a'::text, 'b', true); -- full positional notation dfunc ------- -@@ -1735,37 +2031,32 @@ +@@ -1735,37 +2035,32 @@ (1 row) select dfunc('a'::text, 'b', flag => true); -- mixed notation @@ -227867,7 +226759,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out -- need DO to protect the -- from psql do $$ declare r integer; -@@ -1775,39 +2066,36 @@ +@@ -1775,39 +2070,36 @@ raise info 'r = %', r; end; $$; @@ -227928,7 +226820,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out drop function dfunc(anyelement, anyelement, bool); -- -- Tests for ANYCOMPATIBLE polymorphism family -@@ -1816,283 +2104,213 @@ +@@ -1816,283 +2108,213 @@ returns anycompatible as $$ select greatest($1, $2) $$ language sql; @@ -228248,7 +227140,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +create function anyctest(variadic anycompatiblearray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select x, pg_typeof(x) from anyctest(11, 12) x; - x | pg_typeof ----------+----------- @@ -228321,7 +227213,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/polymorphism.out +drop function anyctest(variadic anycompatiblearray) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --label=/mnt/data1/postgres/src/test/regress/results/rowtypes.out /mnt/data1/postgres/src/test/regress/expected/rowtypes.out /mnt/data1/postgres/src/test/regress/results/rowtypes.out --- /mnt/data1/postgres/src/test/regress/expected/rowtypes.out +++ /mnt/data1/postgres/src/test/regress/results/rowtypes.out @@ -228331,7 +227223,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la create type quad as (c1 complex, c2 complex); +ERROR: unimplemented: composite types that reference user-defined types not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/91779/v24.2 ++See: https://go.crdb.dev/issue-v/91779/dev -- Some simple tests of I/O conversions and row construction select (1.1,2.2)::complex, row((3.3,4.4),(5.5,null))::quad; - row | row @@ -228445,7 +227337,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la create temp table people (fn fullname, bd date); +ERROR: unimplemented: cannot use table record type as table column +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/70099/v24.2 ++See: https://go.crdb.dev/issue-v/70099/dev insert into people values ('(Joe,Blow)', '1984-01-10'); +ERROR: relation "people" does not exist select * from people; @@ -228475,7 +227367,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +update people set fn.suffix = 'Jr' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select * from people; - fn | bd ----------------+------------ @@ -228489,14 +227381,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +insert into quadtable (f1, q.c1.r, q.c2.i) values(44,55,66) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev update quadtable set q.c1.r = 12 where f1 = 2; +ERROR: at or near "c1": syntax error: unimplemented: this syntax +DETAIL: source SQL: +update quadtable set q.c1.r = 12 where f1 = 2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev update quadtable set q.c1 = 12; -- error, type mismatch -ERROR: subfield "c1" is of type complex but expression is of type integer -LINE 1: update quadtable set q.c1 = 12; @@ -228507,7 +227399,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +update quadtable set q.c1 = 12 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select * from quadtable; - f1 | q -----+--------------------------- @@ -228539,7 +227431,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +update people set fn.first = 'Jack' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people; - first | substr | length --------+----------------------+--------- @@ -228741,7 +227633,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la - 4567890123456789 | 123 -(2 rows) - -+ERROR: unsupported comparison operator: ((q1, q2) AS q1, q2) IN ((123, 456)::INT8_TBL, '(4567890123456789,123)'): expected '(4567890123456789,123)' to be of type tuple, found type string ++ERROR: unsupported comparison operator: ((q1, q2) AS q1, q2) IN ((123, 456)::int8_tbl, '(4567890123456789,123)'): expected '(4567890123456789,123)' to be of type tuple, found type string -- Check ability to select columns from an anonymous rowtype select (row(1, 2.0)).f1; - f1 @@ -228815,7 +227707,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create type cantcompare as (p point, r float8) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev create temp table cc (f1 cantcompare); +ERROR: type "cantcompare" does not exist insert into cc values('("(1,2)",3)'); @@ -228854,7 +227746,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create type testtype6 as (a int, b point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6; -ERROR: could not identify a comparison function for type point +ERROR: type "testtype6" does not exist @@ -229092,7 +227984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create type testtype4 as (a int, b point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4; - ?column? ----------- @@ -229147,7 +228039,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create type testtype6 as (a int, b point) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/21286/v24.2 ++See: https://go.crdb.dev/issue-v/21286/dev select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6; - ?column? ----------- @@ -229199,7 +228091,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la active BOOLEAN NOT NULL, price NUMERIC ); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TYPE price_input AS ( id INTEGER, price NUMERIC @@ -229234,7 +228126,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la rollback; -- -- Test case derived from bug #9085: check * qualification of composite -@@ -907,44 +873,32 @@ +@@ -907,44 +873,30 @@ create function fcompos1(v compos) returns void as $$ insert into compos values (v); -- fail $$ language sql; @@ -229290,7 +228182,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- -- We allow I/O conversion casts from composite types to strings to be -@@ -962,14 +916,12 @@ +@@ -962,14 +914,12 @@ (0 rows) select text(fullname) from fullname; -- error @@ -229310,7 +228202,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- same, but RECORD instead of named composite type: select cast (row('Jim', 'Beam') as text); row -@@ -984,14 +936,13 @@ +@@ -984,14 +934,13 @@ (1 row) select text(row('Jim', 'Beam')); -- error @@ -229330,7 +228222,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- -- Check the equivalence of functional and column notation -- -@@ -1003,25 +954,17 @@ +@@ -1003,25 +952,17 @@ (1 row) select last(f) from fullname f; @@ -229363,7 +228255,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- Starting in v11, the notational form does matter if there's ambiguity alter table fullname add column longname text; select f.longname from fullname f; -@@ -1031,159 +974,164 @@ +@@ -1031,159 +972,164 @@ (1 row) select longname(f) from fullname f; @@ -229583,7 +228475,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create temp table tt2 () inherits(tt1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into tt2 values(0,0); select row_to_json(r) from (select q2,q1 from tt1 offset 0) r; - row_to_json @@ -229605,7 +228497,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la +create temp table tt3 () inherits(tt2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev insert into tt3 values(33,44); select row_to_json(tt3::tt2::tt1) from tt3; - row_to_json @@ -229617,7 +228509,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la (1 row) -- -@@ -1193,53 +1141,30 @@ +@@ -1193,53 +1139,30 @@ select r, r is null as isnull, r is not null as isnotnull from (values (1,row(1,2)), (1,row(null,null)), (1,null), (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); @@ -229683,7 +228575,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- -- Tests for component access / FieldSelect -- -@@ -1247,9 +1172,7 @@ +@@ -1247,9 +1170,7 @@ INSERT INTO compositetable(a, b) VALUES('fa', 'fb'); -- composite type columns can't directly be accessed (error) SELECT d.a FROM (SELECT compositetable AS d FROM compositetable) s; @@ -229694,7 +228586,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- but can be accessed with proper parens SELECT (d).a, (d).b FROM (SELECT compositetable AS d FROM compositetable) s; a | b -@@ -1259,14 +1182,10 @@ +@@ -1259,14 +1180,10 @@ -- system columns can't be accessed in composite types (error) SELECT (d).ctid FROM (SELECT compositetable AS d FROM compositetable) s; @@ -229711,7 +228603,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/rowtypes.out --la -- existing column in a NULL composite yield NULL SELECT (NULL::compositetable).a; a -@@ -1276,7 +1195,5 @@ +@@ -1276,7 +1193,5 @@ -- oids can't be accessed in composite types (error) SELECT (NULL::compositetable).oid; @@ -229732,7 +228624,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain domaindroptest int4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev comment on domain domaindroptest is 'About to drop this..'; +ERROR: at or near "domain": syntax error +DETAIL: source SQL: @@ -229744,7 +228636,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dependenttypetest domaindroptest + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- fail because of dependent type drop domain domaindroptest; -ERROR: cannot drop type domaindroptest because other objects depend on it @@ -229755,7 +228647,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain domaindroptest + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain domaindroptest cascade; -NOTICE: drop cascades to type dependenttypetest +ERROR: at or near "domaindroptest": syntax error: unimplemented: this syntax @@ -229763,7 +228655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain domaindroptest cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- this should fail because already gone drop domain domaindroptest cascade; -ERROR: type "domaindroptest" does not exist @@ -229772,7 +228664,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain domaindroptest cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test domain input. -- Note: the point of checking both INSERT and COPY FROM is that INSERT -- exercises CoerceToDomain while COPY exercises domain_in. @@ -229782,28 +228674,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain domainvarchar varchar(5) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain domainnumeric numeric(8,2); +ERROR: at or near "numeric": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain domainnumeric numeric(8,2) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain domainint4 int4; +ERROR: at or near "int4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain domainint4 int4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain domaintext text; +ERROR: at or near "text": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain domaintext text + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test explicit coercions --- these should succeed (and truncate) SELECT cast('123456' as domainvarchar); - domainvarchar @@ -229896,28 +228788,28 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain domainvarchar restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain domainnumeric restrict; +ERROR: at or near "domainnumeric": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain domainnumeric restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain domainint4 restrict; +ERROR: at or near "domainint4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain domainint4 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain domaintext; +ERROR: at or near "domaintext": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain domaintext + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test non-error-throwing input create domain positiveint int4 check(value > 0); +ERROR: at or near "int4": syntax error: unimplemented: this syntax @@ -229925,14 +228817,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain positiveint int4 check(value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain weirdfloat float8 check((1 / value) < 10); +ERROR: at or near "float8": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain weirdfloat float8 check((1 / value) < 10) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select pg_input_is_valid('1', 'positiveint'); - pg_input_is_valid -------------------- @@ -229992,14 +228884,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain positiveint + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain weirdfloat; +ERROR: at or near "weirdfloat": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain weirdfloat + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test domains over array types create domain domainint4arr int4[1]; +ERROR: at or near "int4": syntax error: unimplemented: this syntax @@ -230007,14 +228899,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain domainint4arr int4[1] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain domainchar4arr varchar(4)[2][3]; +ERROR: at or near "varchar": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain domainchar4arr varchar(4)[2][3] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table domarrtest ( testint4arr domainint4arr , testchar4arr domainchar4arr @@ -230135,21 +229027,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain domainint4arr restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain domainchar4arr restrict; +ERROR: at or near "domainchar4arr": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain domainchar4arr restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain dia as int[]; +ERROR: at or near "as": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain dia as int[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select '{1,2,3}'::dia; - dia ---------- @@ -230184,7 +229076,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain dia + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test domains over composites create type comptype as (r float8, i float8); create domain dcomptype as comptype; @@ -230193,7 +229085,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dcomptype as comptype + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table dcomptable (d1 dcomptype unique); +ERROR: type "dcomptype" does not exist insert into dcomptable values (row(1,2)::dcomptype); @@ -230210,7 +229102,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +insert into dcomptable (d1.r) values(11) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select * from dcomptable; - d1 -------- @@ -230235,7 +229127,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select * from dcomptable; - d1 -------- @@ -230292,14 +229184,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +insert into dcomptable (d1.r) values(99) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into dcomptable (d1.r, d1.i) values(99, 100); +ERROR: at or near "r": syntax error: unimplemented: this syntax +DETAIL: source SQL: +insert into dcomptable (d1.r, d1.i) values(99, 100) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail -ERROR: value for domain dcomptype violates check constraint "c1" +ERROR: at or near "r": syntax error: unimplemented: this syntax @@ -230307,7 +229199,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +insert into dcomptable (d1.r, d1.i) values(100, 99) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail -ERROR: value for domain dcomptype violates check constraint "c1" +ERROR: at or near "r": syntax error: unimplemented: this syntax @@ -230315,14 +229207,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +ERROR: at or near "r": syntax error: unimplemented: this syntax +DETAIL: source SQL: +update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27792/v24.2 ++See: https://go.crdb.dev/issue-v/27792/dev select * from dcomptable; - d1 ----------- @@ -230423,7 +229315,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe -NOTICE: drop cascades to type dcomptype +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- check altering and dropping columns used by domain constraints create type comptype as (r float8, i float8); +ERROR: type "root.public.comptype" already exists @@ -230433,7 +229325,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dcomptype as comptype + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev alter domain dcomptype add constraint c1 check ((value).r > 0); +ERROR: at or near "dcomptype": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -230466,14 +229358,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +alter type comptype alter attribute r type varchar + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev alter type comptype alter attribute r type bigint; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +alter type comptype alter attribute r type bigint + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev alter type comptype drop attribute r; -- fail -ERROR: cannot drop column r of composite type comptype because other objects depend on it -DETAIL: constraint c1 depends on column r of composite type comptype @@ -230483,14 +229375,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +alter type comptype drop attribute r + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev alter type comptype drop attribute i; +ERROR: at or near "EOF": syntax error: unimplemented: this syntax +DETAIL: source SQL: +alter type comptype drop attribute i + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev select conname, obj_description(oid, 'pg_constraint') from pg_constraint where contypid = 'dcomptype'::regtype; -- check comment is still there - conname | obj_description @@ -230503,7 +229395,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe -NOTICE: drop cascades to type dcomptype +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- Test domains over arrays of composite create type comptype as (r float8, i float8); +ERROR: type "root.public.comptype" already exists @@ -230513,7 +229405,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dcomptypea as comptype[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table dcomptable (d1 dcomptypea unique); +ERROR: type "dcomptypea" does not exist insert into dcomptable values (array[row(1,2)]::dcomptypea); @@ -230746,7 +229638,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe -NOTICE: drop cascades to type dcomptypea +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- Test arrays over domains create domain posint as int check (value > 0); +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -230754,7 +229646,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain posint as int check (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table pitable (f1 posint[]); +ERROR: type "posint" does not exist insert into pitable values(array[42]); @@ -230795,7 +229687,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain vc4 as varchar(4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table vc4table (f1 vc4[]); +ERROR: type "vc4" does not exist insert into vc4table values(array['too long']); -- fail @@ -230821,7 +229713,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dposinta as posint[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table dposintatable (f1 dposinta[]); +ERROR: type "dposinta" does not exist insert into dposintatable values(array[array[42]]); -- fail @@ -230913,7 +229805,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain posint cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test arrays over domains of composite create type comptype as (cf1 int, cf2 int); +ERROR: type "root.public.comptype" already exists @@ -230923,7 +229815,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dcomptype as comptype check ((value).cf1 > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table dcomptable (f1 dcomptype[]); +ERROR: type "dcomptype" does not exist insert into dcomptable values (null); @@ -230997,7 +229889,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe -NOTICE: drop cascades to type dcomptype +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- Test not-null restrictions create domain dnotnull varchar(15) NOT NULL; +ERROR: at or near "varchar": syntax error: unimplemented: this syntax @@ -231005,21 +229897,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dnotnull varchar(15) NOT NULL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain dnull varchar(15); +ERROR: at or near "varchar": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain dnull varchar(15) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain dcheck varchar(15) NOT NULL CHECK (VALUE = 'a' OR VALUE = 'c' OR VALUE = 'd'); +ERROR: at or near "varchar": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain dcheck varchar(15) NOT NULL CHECK (VALUE = 'a' OR VALUE = 'c' OR VALUE = 'd') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table nulltest ( col1 dnotnull , col2 dnotnull NULL -- NOT NULL in the domain cannot be overridden @@ -231127,35 +230019,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain dnotnull restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain dnull restrict; +ERROR: at or near "dnull": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain dnull restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain dcheck restrict; +ERROR: at or near "dcheck": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain dcheck restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain ddef1 int4 DEFAULT 3; +ERROR: at or near "int4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain ddef1 int4 DEFAULT 3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain ddef2 oid DEFAULT '12'; +ERROR: at or near "oid": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain ddef2 oid DEFAULT '12' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Type mixing, function returns int8 create domain ddef3 text DEFAULT 5; +ERROR: at or near "text": syntax error: unimplemented: this syntax @@ -231163,7 +230055,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain ddef3 text DEFAULT 5 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create sequence ddef4_seq; create domain ddef4 int4 DEFAULT nextval('ddef4_seq'); +ERROR: at or near "int4": syntax error: unimplemented: this syntax @@ -231171,14 +230063,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain ddef4 int4 DEFAULT nextval('ddef4_seq') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain ddef5 numeric(8,2) NOT NULL DEFAULT '12.12'; +ERROR: at or near "numeric": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain ddef5 numeric(8,2) NOT NULL DEFAULT '12.12' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table defaulttest ( col1 ddef1 , col2 ddef2 @@ -231236,7 +230128,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dnotnulltest integer + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table domnotnull ( col1 dnotnulltest , col2 dnotnulltest @@ -231328,7 +230220,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain dnotnulltest cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Test ALTER DOMAIN .. DEFAULT .. create table domdeftest (col1 ddef1); +ERROR: type "ddef1" does not exist @@ -231403,7 +230295,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain con as integer + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table domcontest (col1 con); +ERROR: type "con" does not exist insert into domcontest values (1); @@ -231529,7 +230421,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain things AS INT + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev CREATE TABLE thethings (stuff things); +ERROR: type "things" does not exist INSERT INTO thethings (stuff) VALUES (55); @@ -231610,7 +230502,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain dom as integer + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create view domview as select cast(col1 as dom) from domtab; +ERROR: type "dom" does not exist insert into domtab (col1) values (null); @@ -231716,35 +230608,35 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain ddef1 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain ddef2 restrict; +ERROR: at or near "ddef2": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain ddef2 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain ddef3 restrict; +ERROR: at or near "ddef3": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain ddef3 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain ddef4 restrict; +ERROR: at or near "ddef4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain ddef4 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop domain ddef5 restrict; +ERROR: at or near "ddef5": syntax error: unimplemented: this syntax +DETAIL: source SQL: +drop domain ddef5 restrict + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop sequence ddef4_seq; -- Test domains over domains create domain vchar4 varchar(4); @@ -231753,21 +230645,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain vchar4 varchar(4) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain dinter vchar4 check (substring(VALUE, 1, 1) = 'x'); +ERROR: at or near "vchar4": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain dinter vchar4 check (substring(VALUE, 1, 1) = 'x') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create domain dtop dinter check (substring(VALUE, 2, 1) = '1'); +ERROR: at or near "dinter": syntax error: unimplemented: this syntax +DETAIL: source SQL: +create domain dtop dinter check (substring(VALUE, 2, 1) = '1') + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select 'x123'::dtop; - dtop ------- @@ -231824,7 +230716,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain vchar4 cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Make sure that constraints of newly-added domain columns are -- enforced correctly, even if there's no default value for the new -- column. Per bug #1433 @@ -231834,7 +230726,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain str_domain as text not null + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table domain_test (a int, b int); insert into domain_test values (1, 2); insert into domain_test values (1, 2); @@ -231848,7 +230740,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain str_domain2 as text check (value <> 'foo') default 'foo' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- should fail alter table domain_test add column d str_domain2; -ERROR: value for domain str_domain2 violates check constraint "str_domain2_check" @@ -231861,7 +230753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain pos_int as int4 check (value > 0) not null + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev prepare s1 as select $1::pos_int = 10 as "is_ten"; +ERROR: type "pos_int" does not exist execute s1(10); @@ -231934,7 +230826,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain posint as int4 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Currently, this doesn't work for composite types, but verify it complains create type ddtest1 as (f1 posint); +ERROR: type "posint" does not exist @@ -231992,7 +230884,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain ddtest1d as ddtest1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table ddtest2(f1 ddtest1d); +ERROR: type "ddtest1d" does not exist insert into ddtest2 values('(-1)'); @@ -232022,7 +230914,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain ddtest1d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Likewise for domains within domains over array of composite create domain ddtest1d as ddtest1[]; +ERROR: at or near "as": syntax error: unimplemented: this syntax @@ -232030,7 +230922,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain ddtest1d as ddtest1[] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table ddtest2(f1 ddtest1d); +ERROR: type "ddtest1d" does not exist insert into ddtest2 values('{(-1)}'); @@ -232060,7 +230952,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain ddtest1d + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- Doesn't work for ranges, either create type rposint as range (subtype = posint); +ERROR: at or near "(": syntax error: unimplemented: this syntax @@ -232068,7 +230960,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create type rposint as range (subtype = posint) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27791/v24.2 ++See: https://go.crdb.dev/issue-v/27791/dev create table ddtest2(f1 rposint); +ERROR: type "rposint" does not exist insert into ddtest2 values('(-1,3]'); @@ -232116,7 +231008,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain posint2 as posint check (value % 2 = 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create table ddtest2(f1 posint2); +ERROR: type "posint2" does not exist insert into ddtest2 values(11); -- fail @@ -232171,7 +231063,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain posint cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Check enforcement of domain-related typmod in plpgsql (bug #5717) -- @@ -232208,7 +231100,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain mynums as numeric(4,2)[1] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create or replace function array_elem_check(numeric) returns numeric as $$ declare x mynums; @@ -232245,7 +231137,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain mynums2 as mynums + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create or replace function array_elem_check(numeric) returns numeric as $$ declare x mynums2; @@ -232287,7 +231179,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain orderedpair as int[2] check (value[1] < value[2]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select array[1,2]::orderedpair; - array -------- @@ -232365,7 +231257,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain di as int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev create function dom_check(int) returns di as $$ declare d di; begin @@ -232490,7 +231382,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain di + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Check use of a (non-inline-able) SQL function in a domain constraint; -- this has caused issues in the past @@ -232503,7 +231395,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain inotnull int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select 1::inotnull; - inotnull ----------- @@ -232531,7 +231423,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain inotnull + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev drop function sql_is_distinct_from(anyelement, anyelement); -- -- Renaming @@ -232542,7 +231434,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain testdomain1 as int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev alter domain testdomain1 rename to testdomain2; +ERROR: at or near "testdomain1": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -232567,7 +231459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain testdomain3 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev -- -- Renaming domain constraints -- @@ -232577,7 +231469,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +create domain testdomain1 as int constraint unsigned check (value > 0) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev alter domain testdomain1 rename constraint unsigned to unsigned_foo; +ERROR: at or near "testdomain1": syntax error: unimplemented: this syntax +DETAIL: source SQL: @@ -232616,7 +231508,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/domain.out --labe +drop domain testdomain1 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/largeobject.out --label=/mnt/data1/postgres/src/test/regress/results/largeobject.out /mnt/data1/postgres/src/test/regress/expected/largeobject.out /mnt/data1/postgres/src/test/regress/results/largeobject.out --- /mnt/data1/postgres/src/test/regress/expected/largeobject.out +++ /mnt/data1/postgres/src/test/regress/results/largeobject.out @@ -233363,151 +232255,142 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ************************************************************ -- * -- * Trigger procedures and functions for the patchfield -@@ -101,8 +112,15 @@ +@@ -101,8 +112,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_room_au after update on Room for each row execute procedure tg_room_au(); -+ERROR: at or near "tg_room_au": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_room_au after update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER DELETE on Room -- * - delete wall slots in this room -@@ -113,8 +131,15 @@ +@@ -113,8 +130,14 @@ return old; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_room_ad after delete on Room for each row execute procedure tg_room_ad(); -+ERROR: at or near "tg_room_ad": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_room_ad after delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on WSlot -- * - Check that room exists -@@ -127,8 +152,15 @@ +@@ -127,8 +150,14 @@ return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_wslot_biu before insert or update on WSlot for each row execute procedure tg_wslot_biu(); -+ERROR: at or near "tg_wslot_biu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_wslot_biu before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER UPDATE on PField -- * - Let PSlots of this field follow -@@ -141,8 +173,15 @@ +@@ -141,8 +170,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_pfield_au after update on PField for each row execute procedure tg_pfield_au(); -+ERROR: at or near "tg_pfield_au": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_pfield_au after update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER DELETE on PField -- * - Remove all slots of this patchfield -@@ -153,8 +192,15 @@ +@@ -153,8 +188,14 @@ return old; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_pfield_ad after delete on PField for each row execute procedure tg_pfield_ad(); -+ERROR: at or near "tg_pfield_ad": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_pfield_ad after delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on PSlot -- * - Ensure that our patchfield does exist -@@ -171,8 +217,15 @@ +@@ -171,8 +212,14 @@ return ps; end; $proc$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_pslot_biu before insert or update on PSlot for each row execute procedure tg_pslot_biu(); -+ERROR: at or near "tg_pslot_biu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_pslot_biu before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER UPDATE on System -- * - If system name changes let interfaces follow -@@ -185,8 +238,15 @@ +@@ -185,8 +232,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_system_au after update on System for each row execute procedure tg_system_au(); -+ERROR: at or near "tg_system_au": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_system_au after update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on IFace -- * - set the slotname to IF.sysname.ifname -@@ -210,8 +270,15 @@ +@@ -210,8 +263,14 @@ return new; end; $$ language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_iface_biu before insert or update on IFace for each row execute procedure tg_iface_biu(); -+ERROR: at or near "tg_iface_biu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_iface_biu before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER INSERT or UPDATE or DELETE on Hub -- * - insert/delete/rename slots as required -@@ -238,8 +305,15 @@ +@@ -238,8 +297,14 @@ end if; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_hub_a after insert or update or delete on Hub for each row execute procedure tg_hub_a(); -+ERROR: at or near "tg_hub_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_hub_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * Support function to add/remove slots of Hub -- ************************************************************ -@@ -262,11 +336,51 @@ +@@ -262,11 +327,51 @@ return 0; end ' language plpgsql; @@ -233542,323 +232425,275 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +COMMENT ON FUNCTION tg_hub_adjustslots_wrong(bpchar, integer, integer) IS 'function with args' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS 'function with args'; +ERROR: at or near "tg_hub_adjustslots": syntax error: unimplemented: this syntax +DETAIL: source SQL: +COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS 'function with args' + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS NULL; +ERROR: at or near "tg_hub_adjustslots": syntax error: unimplemented: this syntax +DETAIL: source SQL: +COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS NULL + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on HSlot -- * - prevent from manual manipulation -@@ -300,8 +414,15 @@ +@@ -300,8 +405,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_hslot_biu before insert or update on HSlot for each row execute procedure tg_hslot_biu(); -+ERROR: at or near "tg_hslot_biu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_hslot_biu before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE DELETE on HSlot -- * - prevent from manual manipulation -@@ -320,8 +441,15 @@ +@@ -320,8 +431,14 @@ raise exception ''no manual manipulation of HSlot''; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_hslot_bd before delete on HSlot for each row execute procedure tg_hslot_bd(); -+ERROR: at or near "tg_hslot_bd": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_hslot_bd before delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT on all slots -- * - Check name prefix -@@ -334,16 +462,47 @@ +@@ -334,16 +451,34 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_chkslotname before insert on PSlot for each row execute procedure tg_chkslotname('PS'); -+ERROR: at or near "tg_chkslotname": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotname before insert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotname before insert on WSlot for each row execute procedure tg_chkslotname('WS'); -+ERROR: at or near "tg_chkslotname": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotname before insert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotname before insert on PLine for each row execute procedure tg_chkslotname('PL'); -+ERROR: at or near "tg_chkslotname": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotname before insert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotname before insert on IFace for each row execute procedure tg_chkslotname('IF'); -+ERROR: at or near "tg_chkslotname": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotname before insert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotname before insert on PHone for each row execute procedure tg_chkslotname('PH'); -+ERROR: at or near "tg_chkslotname": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotname before insert -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on all slots with slotlink -- * - Set slotlink to empty string if NULL value given -@@ -356,16 +515,47 @@ +@@ -356,16 +491,34 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_chkslotlink before insert or update on PSlot for each row execute procedure tg_chkslotlink(); -+ERROR: at or near "tg_chkslotlink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotlink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotlink before insert or update on WSlot for each row execute procedure tg_chkslotlink(); -+ERROR: at or near "tg_chkslotlink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotlink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotlink before insert or update on IFace for each row execute procedure tg_chkslotlink(); -+ERROR: at or near "tg_chkslotlink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotlink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotlink before insert or update on HSlot for each row execute procedure tg_chkslotlink(); -+ERROR: at or near "tg_chkslotlink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotlink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkslotlink before insert or update on PHone for each row execute procedure tg_chkslotlink(); -+ERROR: at or near "tg_chkslotlink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkslotlink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE INSERT or UPDATE on all slots with backlink -- * - Set backlink to empty string if NULL value given -@@ -378,12 +568,31 @@ +@@ -378,12 +531,24 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_chkbacklink before insert or update on PSlot for each row execute procedure tg_chkbacklink(); -+ERROR: at or near "tg_chkbacklink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkbacklink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkbacklink before insert or update on WSlot for each row execute procedure tg_chkbacklink(); -+ERROR: at or near "tg_chkbacklink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkbacklink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_chkbacklink before insert or update on PLine for each row execute procedure tg_chkbacklink(); -+ERROR: at or near "tg_chkbacklink": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_chkbacklink before insert or update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on PSlot -- * - do delete/insert instead of update if name changes -@@ -408,8 +617,15 @@ +@@ -408,8 +573,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_pslot_bu before update on PSlot for each row execute procedure tg_pslot_bu(); -+ERROR: at or near "tg_pslot_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_pslot_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on WSlot -- * - do delete/insert instead of update if name changes -@@ -434,8 +650,15 @@ +@@ -434,8 +605,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_wslot_bu before update on WSlot for each row execute procedure tg_Wslot_bu(); -+ERROR: at or near "tg_wslot_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_wslot_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on PLine -- * - do delete/insert instead of update if name changes -@@ -460,8 +683,15 @@ +@@ -460,8 +637,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_pline_bu before update on PLine for each row execute procedure tg_pline_bu(); -+ERROR: at or near "tg_pline_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_pline_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on IFace -- * - do delete/insert instead of update if name changes -@@ -486,8 +716,15 @@ +@@ -486,8 +669,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_iface_bu before update on IFace for each row execute procedure tg_iface_bu(); -+ERROR: at or near "tg_iface_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_iface_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on HSlot -- * - do delete/insert instead of update if name changes -@@ -512,8 +749,15 @@ +@@ -512,8 +701,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_hslot_bu before update on HSlot for each row execute procedure tg_hslot_bu(); -+ERROR: at or near "tg_hslot_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_hslot_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * BEFORE UPDATE on PHone -- * - do delete/insert instead of update if name changes -@@ -536,8 +780,15 @@ +@@ -536,8 +731,14 @@ return new; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_phone_bu before update on PHone for each row execute procedure tg_phone_bu(); -+ERROR: at or near "tg_phone_bu": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_phone_bu before update -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * AFTER INSERT or UPDATE or DELETE on slot with backlink -- * - Ensure that the opponent correctly points back to us -@@ -575,12 +826,31 @@ +@@ -575,12 +776,24 @@ end if; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_backlink_a after insert or update or delete on PSlot for each row execute procedure tg_backlink_a('PS'); -+ERROR: at or near "tg_backlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_backlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_backlink_a after insert or update or delete on WSlot for each row execute procedure tg_backlink_a('WS'); -+ERROR: at or near "tg_backlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_backlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_backlink_a after insert or update or delete on PLine for each row execute procedure tg_backlink_a('PL'); -+ERROR: at or near "tg_backlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_backlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * Support function to set the opponents backlink field -- * if it does not already point to the requested slot -@@ -635,6 +905,9 @@ +@@ -635,6 +848,9 @@ raise exception ''illegal backlink beginning with %'', mytype; end; ' language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev -- ************************************************************ -- * Support function to clear out the backlink field if -- * it still points to specific slot -@@ -680,6 +953,11 @@ +@@ -680,6 +896,11 @@ end if; end ' language plpgsql; @@ -233870,55 +232705,42 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ************************************************************ -- * AFTER INSERT or UPDATE or DELETE on slot with slotlink -- * - Ensure that the opponent correctly points back to us -@@ -717,16 +995,47 @@ +@@ -717,16 +938,34 @@ end if; end; ' language plpgsql; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev create trigger tg_slotlink_a after insert or update or delete on PSlot for each row execute procedure tg_slotlink_a('PS'); -+ERROR: at or near "tg_slotlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_slotlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_slotlink_a after insert or update or delete on WSlot for each row execute procedure tg_slotlink_a('WS'); -+ERROR: at or near "tg_slotlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_slotlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_slotlink_a after insert or update or delete on IFace for each row execute procedure tg_slotlink_a('IF'); -+ERROR: at or near "tg_slotlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_slotlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_slotlink_a after insert or update or delete on HSlot for each row execute procedure tg_slotlink_a('HS'); -+ERROR: at or near "tg_slotlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_slotlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev create trigger tg_slotlink_a after insert or update or delete on PHone for each row execute procedure tg_slotlink_a('PH'); -+ERROR: at or near "tg_slotlink_a": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+create trigger tg_slotlink_a after insert or update or delete -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- ************************************************************ -- * Support function to set the opponents slotlink field -- * if it does not already point to the requested slot -@@ -811,6 +1120,11 @@ +@@ -811,6 +1050,11 @@ raise exception ''illegal slotlink beginning with %'', mytype; end; ' language plpgsql; @@ -233930,7 +232752,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ************************************************************ -- * Support function to clear out the slotlink field if -- * it still points to specific slot -@@ -876,6 +1190,11 @@ +@@ -876,6 +1120,11 @@ end if; end; ' language plpgsql; @@ -233942,17 +232764,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ************************************************************ -- * Describe the backside of a patchfield slot -- ************************************************************ -@@ -919,6 +1238,9 @@ +@@ -919,6 +1168,9 @@ return rec.backlink; end; ' language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev -- ************************************************************ -- * Describe the front of a patchfield slot -- ************************************************************ -@@ -953,54 +1275,83 @@ +@@ -953,54 +1205,83 @@ return psrec.slotlink; end; ' language plpgsql; @@ -234076,7 +232898,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ************************************************************ -- * View of a patchfield describing backside and patches -- ************************************************************ -@@ -1008,6 +1359,8 @@ +@@ -1008,6 +1289,8 @@ pslot_backlink_view(PF.slotname) as backside, pslot_slotlink_view(PF.slotname) as patch from PSlot PF; @@ -234085,7 +232907,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- First we build the house - so we create the rooms -- -@@ -1146,8 +1499,8 @@ +@@ -1146,8 +1429,8 @@ select * from WSlot where roomno = '001' order by slotname; slotname | roomno | slotlink | backlink ----------------------+----------+----------------------+---------------------- @@ -234096,7 +232918,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.2a | 001 | | WS.001.2b | 001 | | WS.001.3a | 001 | | -@@ -1169,9 +1522,9 @@ +@@ -1169,9 +1452,9 @@ select * from WSlot where roomno = '001' order by slotname; slotname | roomno | slotlink | backlink ----------------------+----------+----------------------+---------------------- @@ -234108,7 +232930,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.2b | 001 | | WS.001.3a | 001 | | WS.001.3b | 001 | | -@@ -1192,9 +1545,9 @@ +@@ -1192,9 +1475,9 @@ select * from WSlot where roomno = '001' order by slotname; slotname | roomno | slotlink | backlink ----------------------+----------+----------------------+---------------------- @@ -234121,7 +232943,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.2b | 001 | | WS.001.3a | 001 | | WS.001.3b | 001 | | -@@ -1221,9 +1574,9 @@ +@@ -1221,9 +1504,9 @@ select * from WSlot where roomno = '001' order by slotname; slotname | roomno | slotlink | backlink ----------------------+----------+----------------------+---------------------- @@ -234134,7 +232956,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.2b | 001 | | PS.base.a4 WS.001.3a | 001 | | PS.base.a6 WS.001.3b | 001 | | -@@ -1235,20 +1588,20 @@ +@@ -1235,20 +1518,20 @@ PS.base.a1 | PF0_1 | | WS.001.1a PS.base.a2 | PF0_1 | | WS.001.1b PS.base.a3 | PF0_1 | | WS.001.2a @@ -234161,7 +232983,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.3b | 001 | | PS.base.a6 (6 rows) -@@ -1258,18 +1611,18 @@ +@@ -1258,18 +1541,18 @@ PS.base.a1 | PF0_1 | | WS.001.1a PS.base.a2 | PF0_1 | | WS.001.1b PS.base.a3 | PF0_1 | | WS.001.2a @@ -234185,7 +233007,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab WS.001.2b | 001 | | PS.base.a4 WS.001.3a | 001 | | PS.base.a5 WS.001.3b | 001 | | PS.base.a6 -@@ -1281,9 +1634,9 @@ +@@ -1281,9 +1564,9 @@ PS.base.a1 | PF0_1 | | WS.001.1a PS.base.a2 | PF0_1 | | WS.001.1b PS.base.a3 | PF0_1 | | WS.001.2a @@ -234198,7 +233020,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (6 rows) insert into PField values ('PF1_2', 'Phonelines first floor'); -@@ -1309,9 +1662,9 @@ +@@ -1309,9 +1592,9 @@ PS.base.a1 | PF0_1 | | WS.001.1a PS.base.a2 | PF0_1 | | WS.001.1b PS.base.a3 | PF0_1 | | WS.001.2a @@ -234211,7 +233033,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab PS.base.b1 | PF0_1 | | WS.002.1a PS.base.b2 | PF0_1 | | WS.002.1b PS.base.b3 | PF0_1 | | WS.002.2a -@@ -1324,18 +1677,18 @@ +@@ -1324,18 +1607,18 @@ PS.base.c4 | PF0_1 | | WS.003.2b PS.base.c5 | PF0_1 | | WS.003.3a PS.base.c6 | PF0_1 | | WS.003.3b @@ -234242,7 +233064,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab PS.first.a1 | PF1_1 | | WS.101.1a PS.first.a2 | PF1_1 | | WS.101.1b PS.first.a3 | PF1_1 | | WS.101.2a -@@ -1377,48 +1730,48 @@ +@@ -1377,48 +1660,48 @@ select * from WSlot order by slotname; slotname | roomno | slotlink | backlink ----------------------+----------+----------------------+---------------------- @@ -234330,7 +233152,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (42 rows) -- -@@ -1471,82 +1824,22 @@ +@@ -1471,82 +1754,22 @@ -- Now we take a look at the patchfield -- select * from PField_v1 where pfname = 'PF0_1' order by slotname; @@ -234415,7 +233237,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- The following tests are unrelated to the scenario outlined above; -- they merely exercise specific parts of PL/pgSQL -@@ -1564,12 +1857,10 @@ +@@ -1564,12 +1787,9 @@ END IF; RETURN rslt; END;' LANGUAGE plpgsql; @@ -234430,7 +233252,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test the FOUND magic variable -- -@@ -1609,22 +1900,44 @@ +@@ -1609,22 +1829,44 @@ end if; return true; end;' language plpgsql; @@ -234465,10 +233287,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. -+ + +If you would rather not post publicly, please contact us directly +using the support form. - ++ +We appreciate your feedback. + +select test_found(); @@ -234489,13 +233311,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test set-returning functions for PL/pgSQL -@@ -1638,17 +1951,11 @@ +@@ -1638,17 +1880,11 @@ END LOOP; RETURN; END;' language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from test_table_func_rec(); - a ------ @@ -234511,13 +233333,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function test_table_func_row() returns setof found_test_tbl as ' DECLARE row found_test_tbl%ROWTYPE; -@@ -1658,17 +1965,11 @@ +@@ -1658,17 +1894,11 @@ END LOOP; RETURN; END;' language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from test_table_func_row(); - a ------ @@ -234533,13 +233355,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function test_ret_set_scalar(int,int) returns setof int as ' DECLARE i int; -@@ -1678,21 +1979,11 @@ +@@ -1678,21 +1908,11 @@ END LOOP; RETURN; END;' language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from test_ret_set_scalar(1,10); - test_ret_set_scalar ---------------------- @@ -234559,13 +233381,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function test_ret_set_rec_dyn(int) returns setof record as ' DECLARE retval RECORD; -@@ -1708,20 +1999,21 @@ +@@ -1708,20 +1928,21 @@ END IF; RETURN; END;' language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev SELECT * FROM test_ret_set_rec_dyn(1500) AS (a int, b int, c int); - a | b | c ----+----+---- @@ -234593,13 +233415,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function test_ret_rec_dyn(int) returns record as ' DECLARE retval RECORD; -@@ -1734,18 +2026,21 @@ +@@ -1734,18 +1955,21 @@ RETURN retval; END IF; END;' language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev SELECT * FROM test_ret_rec_dyn(1500) AS (a int, b int, c int); - a | b | c ----+----+---- @@ -234625,7 +233447,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test some simple polymorphism cases. -- -@@ -1753,31 +2048,37 @@ +@@ -1753,31 +1977,37 @@ begin return x + 1; end$$ language plpgsql; @@ -234679,7 +233501,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function f1(x anyarray) returns anyelement as $$ begin return x[1]; -@@ -1789,7 +2090,10 @@ +@@ -1789,7 +2019,10 @@ (1 row) select f1(stavalues1) from pg_statistic; -- fail, can't infer element type @@ -234691,7 +233513,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop function f1(x anyarray); create function f1(x anyarray) returns anyarray as $$ begin -@@ -1802,72 +2106,61 @@ +@@ -1802,72 +2035,61 @@ (1 row) select f1(stavalues1) from pg_statistic; -- fail, can't infer element type @@ -234783,7 +233605,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function f1(a anyelement, b anyarray, c anycompatible, d anycompatible, OUT x anyarray, OUT y anycompatiblearray) -@@ -1876,35 +2169,30 @@ +@@ -1876,35 +2098,30 @@ x := a || b; y := array[c, d]; end$$ language plpgsql; @@ -234833,7 +233655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test handling of OUT parameters, including polymorphic cases. -- Note that RETURN is optional with OUT params; we try both ways. -@@ -1915,8 +2203,6 @@ +@@ -1915,8 +2132,6 @@ return i+1; end$$ language plpgsql; ERROR: RETURN cannot have a parameter in function with OUT parameters @@ -234842,7 +233664,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function f1(in i int, out j int) as $$ begin j := i+1; -@@ -1959,14 +2245,27 @@ +@@ -1959,14 +2174,27 @@ return next; return; end$$ language plpgsql; @@ -234866,9 +233688,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +select * from f1(42); +ERROR: unknown function: f1() drop function f1(int); @@ -234876,7 +233698,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function f1(in i int, out j int, out k text) as $$ begin j := i; -@@ -1995,14 +2294,28 @@ +@@ -1995,14 +2223,28 @@ k := 'foot'; return next; end$$ language plpgsql; @@ -234901,9 +233723,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +select * from f1(42); +ERROR: unknown function: f1() drop function f1(int); @@ -234911,7 +233733,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function duplic(in i anyelement, out j anyelement, out k anyarray) as $$ begin j := i; -@@ -2028,19 +2341,13 @@ +@@ -2028,19 +2270,13 @@ k := array[lower(i),upper(i)]; return; end$$ language plpgsql; @@ -234935,7 +233757,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- test PERFORM -- -@@ -2057,6 +2364,7 @@ +@@ -2057,6 +2293,7 @@ RETURN FALSE; END IF; END;' language plpgsql; @@ -234943,7 +233765,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function perform_test_func() returns void as ' BEGIN IF FOUND then -@@ -2077,19 +2385,32 @@ +@@ -2077,19 +2314,32 @@ RETURN; END;' language plpgsql; @@ -234966,12 +233788,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. -+ + +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. - ++ +SELECT perform_test_func(); +ERROR: unknown function: perform_test_func() SELECT * FROM perform_test; @@ -234987,7 +233809,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop table perform_test; -- -@@ -2103,19 +2424,12 @@ +@@ -2103,19 +2353,12 @@ if found then return x; end if; return 0; end$$ language plpgsql stable; @@ -235010,7 +233832,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function sp_add_user(a_login text) returns int as $$ declare my_id_user int; begin -@@ -2130,38 +2444,22 @@ +@@ -2130,38 +2373,21 @@ END IF; RETURN my_id_user; end$$ language plpgsql; @@ -235057,13 +233879,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- tests for refcursors -- -@@ -2185,12 +2483,13 @@ +@@ -2185,12 +2411,13 @@ return x.a; end $$ language plpgsql; +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "rc" shadows a previously defined variable select use_refcursor(return_unnamed_refcursor()); @@ -235076,7 +233898,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function return_refcursor(rc refcursor) returns refcursor as $$ begin open rc for select a from rc_test; -@@ -2203,33 +2502,31 @@ +@@ -2203,33 +2430,31 @@ return $1; end $$ language plpgsql; @@ -235130,7 +233952,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab commit; -- should fail fetch next from test1; -@@ -2249,13 +2546,25 @@ +@@ -2249,13 +2474,25 @@ end if; end $$ language plpgsql; @@ -235161,7 +233983,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- should fail create function constant_refcursor() returns refcursor as $$ declare -@@ -2266,8 +2575,11 @@ +@@ -2266,8 +2503,11 @@ end $$ language plpgsql; select constant_refcursor(); @@ -235175,7 +233997,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- but it's okay like this create or replace function constant_refcursor() returns refcursor as $$ declare -@@ -2301,13 +2613,25 @@ +@@ -2301,13 +2541,25 @@ end if; end $$ language plpgsql; @@ -235206,7 +234028,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- mixing named and positional argument notations create function namedparmcursor_test2(int, int) returns boolean as $$ declare -@@ -2324,12 +2648,24 @@ +@@ -2324,12 +2576,24 @@ end if; end $$ language plpgsql; @@ -235236,7 +234058,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- mixing named and positional: param2 is given twice, once in named notation -- and second time in positional notation. Should throw an error at parse time create function namedparmcursor_test3() returns void as $$ -@@ -2339,9 +2675,22 @@ +@@ -2339,9 +2603,22 @@ open c1(param2 := 20, 21); end $$ language plpgsql; @@ -235262,7 +234084,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- mixing named and positional: same as previous test, but param1 is duplicated create function namedparmcursor_test4() returns void as $$ declare -@@ -2350,9 +2699,22 @@ +@@ -2350,9 +2627,22 @@ open c1(20, param1 := 21); end $$ language plpgsql; @@ -235288,7 +234110,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- duplicate named parameter, should throw an error at parse time create function namedparmcursor_test5() returns void as $$ declare -@@ -2362,9 +2724,22 @@ +@@ -2362,9 +2652,22 @@ open c1 (p2 := 77, p2 := 42); end $$ language plpgsql; @@ -235314,7 +234136,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- not enough parameters, should throw an error at parse time create function namedparmcursor_test6() returns void as $$ declare -@@ -2374,9 +2749,22 @@ +@@ -2374,9 +2677,22 @@ open c1 (p2 := 77); end $$ language plpgsql; @@ -235340,7 +234162,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- division by zero runtime error, the context given in the error message -- should be sensible create function namedparmcursor_test7() returns void as $$ -@@ -2386,10 +2774,24 @@ +@@ -2386,10 +2702,24 @@ begin open c1 (p2 := 77, p1 := 42/0); end $$ language plpgsql; @@ -235368,7 +234190,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- check that line comments work correctly within the argument list (there -- is some special handling of this case in the code: the newline after the -- comment must be preserved when the argument-evaluating query is -@@ -2406,12 +2808,24 @@ +@@ -2406,12 +2736,24 @@ fetch c1 into n; return n; end $$ language plpgsql; @@ -235390,15 +234212,15 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. -+ -+We appreciate your feedback. ++We appreciate your feedback. ++ +select namedparmcursor_test8(); +ERROR: unknown function: namedparmcursor_test8() -- cursor parameter name can match plpgsql variable or unreserved keyword create function namedparmcursor_test9(p1 int) returns int4 as $$ declare -@@ -2425,12 +2839,24 @@ +@@ -2425,12 +2767,24 @@ fetch c1 into n; return n; end $$ language plpgsql; @@ -235417,18 +234239,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. -+ + +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. - ++ +select namedparmcursor_test9(6); +ERROR: unknown function: namedparmcursor_test9() -- -- tests for "raise" processing -- -@@ -2441,28 +2867,22 @@ +@@ -2441,28 +2795,22 @@ end; $$ language plpgsql; ERROR: too many parameters specified for RAISE @@ -235460,7 +234282,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Test re-RAISE inside a nested exception block. This case is allowed -- by Oracle's PL/SQL but was handled differently by PG before 9.1. CREATE FUNCTION reraise_test() RETURNS void AS $$ -@@ -2484,14 +2904,30 @@ +@@ -2484,14 +2832,30 @@ raise notice 'WRONG - exception % caught in outer block', sqlerrm; END; $$ LANGUAGE plpgsql; @@ -235487,10 +234309,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. -+ + +We appreciate your feedback. + +SELECT reraise_test(); @@ -235498,7 +234320,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- reject function definitions that contain malformed SQL queries at -- compile-time, where possible -@@ -2504,9 +2940,17 @@ +@@ -2504,9 +2868,17 @@ a := 10; return a; end$$ language plpgsql; @@ -235519,7 +234341,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function bad_sql2() returns int as $$ declare r record; begin -@@ -2515,81 +2959,115 @@ +@@ -2515,81 +2887,115 @@ end loop; return 5; end;$$ language plpgsql; @@ -235576,14 +234398,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + perform 2+2; + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+ + +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. + +If you would rather not post publicly, please contact us directly +using the support form. - ++ +We appreciate your feedback. + +-- select void_return_expr(); @@ -235673,11 +234495,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop type eitype cascade; +ERROR: unimplemented: DROP TYPE CASCADE is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/51480/v24.2 ++See: https://go.crdb.dev/issue-v/51480/dev -- -- SQLSTATE and SQLERRM test -- -@@ -2597,14 +3075,11 @@ +@@ -2597,14 +3003,11 @@ begin raise notice '% %', sqlstate, sqlerrm; end; $$ language plpgsql; @@ -235694,7 +234516,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function excpt_test2() returns void as $$ begin begin -@@ -2613,13 +3088,10 @@ +@@ -2613,13 +3016,10 @@ end; end; end; $$ language plpgsql; @@ -235710,7 +234532,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function excpt_test3() returns void as $$ begin begin -@@ -2639,31 +3111,61 @@ +@@ -2639,31 +3039,61 @@ raise notice '% %', sqlstate, sqlerrm; end; end; $$ language plpgsql; @@ -235739,12 +234561,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select excpt_test3(); +ERROR: unknown function: excpt_test3() create function excpt_test4() returns text as $$ @@ -235763,14 +234585,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + begin perform 1/0; + ^ +HINT: You have attempted to use a feature that is not yet implemented. - ++ +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. + +If you would rather not post publicly, please contact us directly +using the support form. -+ + +We appreciate your feedback. + +select excpt_test4(); @@ -235786,13 +234608,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- parameters of raise stmt can be expressions create function raise_exprs() returns void as $$ declare -@@ -2714,13 +3216,11 @@ +@@ -2714,13 +3144,11 @@ insert into foo values(5,6) returning * into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -NOTICE: x.f1 = 5, x.f2 = 6 - stricttest @@ -235804,13 +234626,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2728,10 +3228,11 @@ +@@ -2728,10 +3156,11 @@ insert into foo values(7,8),(9,10) returning * into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. @@ -235819,13 +234641,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2739,13 +3240,11 @@ +@@ -2739,13 +3168,11 @@ execute 'insert into foo values(5,6) returning *' into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -NOTICE: x.f1 = 5, x.f2 = 6 - stricttest @@ -235837,13 +234659,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2753,23 +3252,17 @@ +@@ -2753,23 +3180,17 @@ execute 'insert into foo values(7,8),(9,10) returning *' into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -NOTICE: x.f1 = 7, x.f2 = 8 - stricttest @@ -235866,13 +234688,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; -@@ -2778,13 +3271,11 @@ +@@ -2778,13 +3199,11 @@ select * from foo where f1 = 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -NOTICE: x.f1 = 3, x.f2 = 4 - stricttest @@ -235884,13 +234706,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2792,9 +3283,11 @@ +@@ -2792,9 +3211,11 @@ select * from foo where f1 = 0 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned no rows -CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement @@ -235898,13 +234720,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2802,10 +3295,11 @@ +@@ -2802,10 +3223,11 @@ select * from foo where f1 > 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. @@ -235913,13 +234735,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2813,13 +3307,11 @@ +@@ -2813,13 +3235,11 @@ execute 'select * from foo where f1 = 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -NOTICE: x.f1 = 3, x.f2 = 4 - stricttest @@ -235931,13 +234753,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2827,9 +3319,11 @@ +@@ -2827,9 +3247,11 @@ execute 'select * from foo where f1 = 0' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned no rows -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE @@ -235945,13 +234767,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2837,12 +3331,17 @@ +@@ -2837,12 +3259,17 @@ execute 'select * from foo where f1 > 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE @@ -235965,13 +234787,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; -@@ -2853,10 +3352,11 @@ +@@ -2853,10 +3280,11 @@ select * from foo where f1 = p1 and f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: p1 = '2', p3 = 'foo' @@ -235980,13 +234802,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; -@@ -2867,10 +3367,11 @@ +@@ -2867,10 +3295,11 @@ select * from foo where f1 = p1 and f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: p1 = '2', p3 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia?''' @@ -235995,13 +234817,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; -@@ -2881,11 +3382,11 @@ +@@ -2881,11 +3310,11 @@ select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -DETAIL: parameters: p1 = '2', p3 = 'foo' @@ -236011,13 +234833,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2893,10 +3394,11 @@ +@@ -2893,10 +3322,11 @@ select * from foo where f1 > 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -HINT: Make sure the query returns a single row, or use LIMIT 1. @@ -236026,13 +234848,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2904,10 +3406,11 @@ +@@ -2904,10 +3334,11 @@ execute 'select * from foo where f1 = $1 or f1::text = $2' using 0, 'foo' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned no rows -DETAIL: parameters: $1 = '0', $2 = 'foo' @@ -236041,13 +234863,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2915,10 +3418,11 @@ +@@ -2915,10 +3346,11 @@ execute 'select * from foo where f1 > $1' using 1 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -DETAIL: parameters: $1 = '1' @@ -236056,13 +234878,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ declare x record; begin -@@ -2926,9 +3430,11 @@ +@@ -2926,9 +3358,11 @@ execute 'select * from foo where f1 > 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select stricttest(); -ERROR: query returned more than one row -CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE @@ -236070,7 +234892,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function stricttest() returns void as $$ -- override the global #print_strict_params off -@@ -2941,10 +3447,12 @@ +@@ -2941,10 +3375,12 @@ select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; @@ -236086,7 +234908,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab reset plpgsql.print_strict_params; create or replace function stricttest() returns void as $$ -- override the global -@@ -2958,11 +3466,12 @@ +@@ -2958,11 +3394,12 @@ select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; @@ -236103,7 +234925,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test warnings and errors set plpgsql.extra_warnings to 'all'; set plpgsql.extra_warnings to 'none'; -@@ -2979,23 +3488,18 @@ +@@ -2979,23 +3416,18 @@ begin end $$ language plpgsql; @@ -236119,7 +234941,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + returns table (out1 int) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select shadowtest(1); - shadowtest ------------- @@ -236136,7 +234958,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function shadowtest(in1 int) returns table (out1 int) as $$ declare -@@ -3004,18 +3508,17 @@ +@@ -3004,18 +3436,17 @@ begin end $$ language plpgsql; @@ -236152,7 +234974,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + returns table (out1 int) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select shadowtest(1); - shadowtest ------------- @@ -236164,7 +234986,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- shadowing in a second DECLARE block create or replace function shadowtest() returns void as $$ -@@ -3027,10 +3530,13 @@ +@@ -3027,10 +3458,13 @@ begin end; end$$ language plpgsql; @@ -236173,7 +234995,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab - ^ +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "f1" shadows a previously defined variable drop function shadowtest(); @@ -236181,7 +235003,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- several levels of shadowing create or replace function shadowtest(in1 int) returns void as $$ -@@ -3042,13 +3548,13 @@ +@@ -3042,13 +3476,13 @@ begin end; end$$ language plpgsql; @@ -236193,7 +235015,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab - ^ +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "in1" shadows a previously defined variable drop function shadowtest(int); @@ -236201,7 +235023,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- shadowing in cursor definitions create or replace function shadowtest() returns void as $$ -@@ -3057,34 +3563,49 @@ +@@ -3057,34 +3491,49 @@ c1 cursor (f1 int) for select 1; begin end$$ language plpgsql; @@ -236237,7 +235059,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab - ^ +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "f1" shadows a previously defined variable select shadowtest(1); @@ -236253,7 +235075,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab declare f1 int; begin return 1; end $$ language plpgsql; +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "f1" shadows a previously defined variable select shadowtest(1); @@ -236266,7 +235088,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- runtime extra checks set plpgsql.extra_warnings to 'too_many_rows'; do $$ -@@ -3093,8 +3614,10 @@ +@@ -3093,8 +3542,10 @@ select v from generate_series(1,2) g(v) into x; end; $$; @@ -236279,7 +235101,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab set plpgsql.extra_errors to 'too_many_rows'; do $$ declare x int; -@@ -3102,9 +3625,10 @@ +@@ -3102,9 +3553,10 @@ select v from generate_series(1,2) g(v) into x; end; $$; @@ -236293,7 +235115,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab reset plpgsql.extra_errors; reset plpgsql.extra_warnings; set plpgsql.extra_warnings to 'strict_multi_assignment'; -@@ -3118,12 +3642,10 @@ +@@ -3118,12 +3570,10 @@ select 1,2,3 into x, y; end $$; @@ -236310,7 +235132,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab set plpgsql.extra_errors to 'strict_multi_assignment'; do $$ declare -@@ -3135,10 +3657,10 @@ +@@ -3135,10 +3585,10 @@ select 1,2,3 into x, y; end $$; @@ -236325,7 +235147,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create table test_01(a int, b int, c int); alter table test_01 drop column a; -- the check is active only when source table is not empty -@@ -3153,11 +3675,10 @@ +@@ -3153,11 +3603,10 @@ select * from test_01 into x; -- should to fail end; $$; @@ -236341,7 +235163,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab do $$ declare t test_01; -@@ -3167,11 +3688,10 @@ +@@ -3167,11 +3616,10 @@ select 1, 2, 3 into t; -- should fail; end; $$; @@ -236357,7 +235179,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab do $$ declare t test_01; -@@ -3179,10 +3699,10 @@ +@@ -3179,10 +3627,10 @@ select 1 into t; -- should fail; end; $$; @@ -236372,13 +235194,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop table test_01; reset plpgsql.extra_errors; reset plpgsql.extra_warnings; -@@ -3201,16 +3721,11 @@ +@@ -3201,16 +3649,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test -------------- @@ -236393,13 +235215,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c no scroll cursor for select f1 from int4_tbl; -@@ -3225,10 +3740,11 @@ +@@ -3225,10 +3668,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); -- fails because of NO SCROLL specification -ERROR: cursor can only scan forward -HINT: Declare it with SCROLL option to enable backward scan. @@ -236408,13 +235230,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c refcursor; -@@ -3243,16 +3759,11 @@ +@@ -3243,16 +3687,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test -------------- @@ -236429,13 +235251,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c refcursor; -@@ -3267,14 +3778,11 @@ +@@ -3267,14 +3706,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test -------------- @@ -236448,13 +235270,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c refcursor; -@@ -3290,13 +3798,11 @@ +@@ -3290,13 +3726,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test -------------- @@ -236466,13 +235288,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c cursor for select * from generate_series(1, 10); -@@ -3316,14 +3822,11 @@ +@@ -3316,14 +3750,11 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test ---------- @@ -236485,13 +235307,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function sc_test() returns setof integer as $$ declare c cursor for select * from generate_series(1, 10); -@@ -3338,13 +3841,13 @@ +@@ -3338,13 +3769,13 @@ close c; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from sc_test(); - sc_test ---------- @@ -236504,13 +235326,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test qualified variable names create function pl_qual_names (param1 int) returns void as $$ <> -@@ -3362,17 +3865,15 @@ +@@ -3362,17 +3793,15 @@ end; end; $$ language plpgsql; +ERROR: unimplemented: variable shadowing is not yet implemented +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/117508/v24.2 ++See: https://go.crdb.dev/issue-v/117508/dev +-- +variable "param1" shadows a previously defined variable select pl_qual_names(42); @@ -236529,7 +235351,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- tests for RETURN QUERY create function ret_query1(out int, out int) returns setof record as $$ begin -@@ -3383,24 +3884,13 @@ +@@ -3383,24 +3812,13 @@ return next; end; $$ language plpgsql; @@ -236560,13 +235382,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create type record_type as (x text, y int, z boolean); create or replace function ret_query2(lim int) returns setof record_type as $$ begin -@@ -3408,20 +3898,11 @@ +@@ -3408,20 +3826,11 @@ from generate_series(-8, lim) s (x) where s.x % 2 = 0; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from ret_query2(8); - x | y | z -----------------------------------+----+--- @@ -236585,7 +235407,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test EXECUTE USING create function exc_using(int, text) returns int as $$ declare i int; -@@ -3433,19 +3914,27 @@ +@@ -3433,19 +3842,27 @@ return i; end $$ language plpgsql; @@ -236611,12 +235433,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select exc_using(5, 'foobar'); +ERROR: unknown function: exc_using() drop function exc_using(int, text); @@ -236624,7 +235446,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function exc_using(int) returns void as $$ declare c refcursor; -@@ -3461,19 +3950,29 @@ +@@ -3461,19 +3878,29 @@ return; end; $$ language plpgsql; @@ -236655,9 +235477,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +select exc_using(5); +ERROR: unknown function: exc_using() drop function exc_using(int); @@ -236665,7 +235487,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test FOR-over-cursor create or replace function forc01() returns void as $$ declare -@@ -3513,29 +4012,24 @@ +@@ -3513,29 +3940,24 @@ return; end; $$ language plpgsql; @@ -236712,7 +235534,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- try updating the cursor's current row create temp table forc_test as select n as i, n as j from generate_series(1,10) n; -@@ -3549,35 +4043,39 @@ +@@ -3549,35 +3971,39 @@ end loop; end; $$ language plpgsql; @@ -236739,11 +235561,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + for r in c loop + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+ + +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + @@ -236779,7 +235601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (10 rows) -- same, with a cursor whose portal name doesn't match variable name -@@ -3595,38 +4093,42 @@ +@@ -3595,38 +4021,42 @@ end loop; end; $$ language plpgsql; @@ -236849,7 +235671,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- it's okay to re-use a cursor variable name, even when bound do $$ declare cnt int := 0; -@@ -3642,7 +4144,10 @@ +@@ -3642,7 +4072,10 @@ end loop; raise notice 'cnt = %', cnt; end $$; @@ -236861,7 +235683,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- fail because cursor has no query bound to it create or replace function forc_bad() returns void as $$ declare -@@ -3653,9 +4158,24 @@ +@@ -3653,9 +4086,24 @@ end loop; end; $$ language plpgsql; @@ -236889,13 +235711,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test RETURN QUERY EXECUTE create or replace function return_dquery() returns setof int as $$ -@@ -3664,16 +4184,13 @@ +@@ -3664,16 +4112,13 @@ return query execute 'select * from (values($1),($2)) f' using 40,50; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from return_dquery(); - return_dquery ---------------- @@ -236911,13 +235733,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test RETURN QUERY with dropped columns create table tabwithcols(a int, b int, c int, d int); insert into tabwithcols values(10,20,30,40),(50,60,70,80); -@@ -3684,46 +4201,22 @@ +@@ -3684,46 +4129,22 @@ return query execute 'select * from tabwithcols'; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from returnqueryf(); - a | b | c | d -----+----+----+---- @@ -236966,17 +235788,17 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop table tabwithcols; -- -- Tests for composite-type results -@@ -3753,6 +4246,9 @@ +@@ -3753,6 +4174,9 @@ return v; end; $$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select compos(); compos ----------- -@@ -3778,9 +4274,11 @@ +@@ -3778,9 +4202,11 @@ end; $$ language plpgsql; select compos(); @@ -236991,13 +235813,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ... but this does create or replace function compos() returns compostype as $$ begin -@@ -3803,12 +4301,11 @@ +@@ -3803,12 +4229,11 @@ return v; end; $$ language plpgsql; +ERROR: unimplemented: RECORD type for PL/pgSQL variables is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/114874/v24.2 ++See: https://go.crdb.dev/issue-v/114874/dev select composrec(); - composrec ------------ @@ -237008,13 +235830,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test: return row expr in return statement. create or replace function composrec() returns record as $$ begin -@@ -3833,26 +4330,22 @@ +@@ -3833,26 +4258,22 @@ return next (2, 'goodbye')::compostype; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from compos(); - x | y ----+--------- @@ -237042,7 +235864,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- RETURN variable is a different code path ... create or replace function compos() returns compostype as $$ declare x int := 42; -@@ -3861,8 +4354,7 @@ +@@ -3861,8 +4282,7 @@ end; $$ language plpgsql; select * from compos(); @@ -237052,7 +235874,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop function compos(); -- test: invalid use of composite variable in scalar-returning function create or replace function compos() returns int as $$ -@@ -3874,8 +4366,7 @@ +@@ -3874,8 +4294,7 @@ end; $$ language plpgsql; select compos(); @@ -237062,7 +235884,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test: invalid use of composite expression in scalar-returning function create or replace function compos() returns int as $$ begin -@@ -3883,8 +4374,7 @@ +@@ -3883,8 +4302,7 @@ end; $$ language plpgsql; select compos(); @@ -237072,7 +235894,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop function compos(); drop type compostype; -- -@@ -3904,7 +4394,6 @@ +@@ -3904,7 +4322,6 @@ HINT: some hint ERROR: 1 2 3 DETAIL: some detail info @@ -237080,7 +235902,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Since we can't actually see the thrown SQLSTATE in default psql output, -- test it like this; this also tests re-RAISE create or replace function raise_test() returns void as $$ -@@ -3917,11 +4406,33 @@ +@@ -3917,11 +4334,33 @@ raise; end; $$ language plpgsql; @@ -237117,7 +235939,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise 'check me' -@@ -3932,11 +4443,33 @@ +@@ -3932,11 +4371,33 @@ raise; end; $$ language plpgsql; @@ -237154,7 +235976,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- SQLSTATE specification in WHEN create or replace function raise_test() returns void as $$ begin -@@ -3948,11 +4481,33 @@ +@@ -3948,11 +4409,33 @@ raise; end; $$ language plpgsql; @@ -237191,7 +236013,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise division_by_zero using detail = 'some detail info'; -@@ -3962,11 +4517,32 @@ +@@ -3962,11 +4445,32 @@ raise; end; $$ language plpgsql; @@ -237227,7 +236049,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise division_by_zero; -@@ -3974,7 +4550,6 @@ +@@ -3974,7 +4478,6 @@ $$ language plpgsql; select raise_test(); ERROR: division_by_zero @@ -237235,7 +236057,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise sqlstate '1234F'; -@@ -3982,7 +4557,6 @@ +@@ -3982,7 +4485,6 @@ $$ language plpgsql; select raise_test(); ERROR: 1234F @@ -237243,7 +236065,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise division_by_zero using message = 'custom' || ' message'; -@@ -3990,7 +4564,6 @@ +@@ -3990,7 +4492,6 @@ $$ language plpgsql; select raise_test(); ERROR: custom message @@ -237251,7 +236073,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise using message = 'custom' || ' message', errcode = '22012'; -@@ -3998,34 +4571,48 @@ +@@ -3998,34 +4499,48 @@ $$ language plpgsql; select raise_test(); ERROR: custom message @@ -237307,7 +236129,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test access to exception data create function zero_divide() returns int as $$ declare v int := 0; -@@ -4033,6 +4620,7 @@ +@@ -4033,6 +4548,7 @@ return 10 / v; end; $$ language plpgsql; @@ -237315,7 +236137,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function raise_test() returns void as $$ begin raise exception 'custom exception' -@@ -4055,13 +4643,27 @@ +@@ -4055,13 +4571,27 @@ _sqlstate, _message, replace(_context, E'\n', ' <- '); end; $$ language plpgsql; @@ -237338,18 +236160,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select stacked_diagnostics_test(); +ERROR: unknown function: stacked_diagnostics_test() create or replace function stacked_diagnostics_test() returns void as $$ declare _detail text; _hint text; -@@ -4076,13 +4678,27 @@ +@@ -4076,13 +4606,27 @@ raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; end; $$ language plpgsql; @@ -237372,10 +236194,10 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. -+ + +We appreciate your feedback. + +select stacked_diagnostics_test(); @@ -237383,11 +236205,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- fail, cannot use stacked diagnostics statement outside handler create or replace function stacked_diagnostics_test() returns void as $$ declare _detail text; -@@ -4096,11 +4712,24 @@ +@@ -4096,11 +4640,25 @@ raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; end; $$ language plpgsql; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_get_diag is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -237410,7 +236233,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- check cases where implicit SQLSTATE variable could be confused with -- SQLSTATE as a keyword, cf bug #5524 create or replace function raise_test() returns void as $$ -@@ -4112,10 +4741,26 @@ +@@ -4112,10 +4670,26 @@ raise sqlstate '22012' using message = 'substitute message'; end; $$ language plpgsql; @@ -237440,7 +236263,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop function raise_test(); -- test passing column_name, constraint_name, datatype_name, table_name -- and schema_name error fields -@@ -4143,14 +4788,22 @@ +@@ -4143,14 +4717,23 @@ _column_name, _constraint_name, _datatype_name, _table_name, _schema_name; end; $$ language plpgsql; @@ -237451,17 +236274,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab - -(1 row) +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_get_diag is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select stacked_diagnostics_test(); +ERROR: unknown function: stacked_diagnostics_test() drop function stacked_diagnostics_test(); @@ -237469,7 +236293,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- test variadic functions create or replace function vari(variadic int[]) returns void as $$ -@@ -4159,36 +4812,34 @@ +@@ -4159,36 +4742,34 @@ raise notice '%', $1[i]; end loop; end; $$ language plpgsql; @@ -237478,7 +236302,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create or replace function vari(variadic int[]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select vari(1,2,3,4,5); -NOTICE: 1 -NOTICE: 2 @@ -237529,7 +236353,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- coercion test create or replace function pleast(variadic numeric[]) returns numeric as $$ -@@ -4200,30 +4851,20 @@ +@@ -4200,30 +4781,20 @@ return aux; end; $$ language plpgsql immutable strict; @@ -237538,7 +236362,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create or replace function pleast(variadic numeric[]) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/88947/v24.2 ++See: https://go.crdb.dev/issue-v/88947/dev select pleast(10,1,2,3,-16); - pleast --------- @@ -237570,7 +236394,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- in case of conflict, non-variadic version is preferred create or replace function pleast(numeric) returns numeric as $$ -@@ -4232,31 +4873,27 @@ +@@ -4232,31 +4803,27 @@ return $1; end; $$ language plpgsql immutable strict; @@ -237598,7 +236422,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create function tftest(int) returns table(a int, b int) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select * from tftest(10); - a | b -----+---- @@ -237613,7 +236437,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function tftest(a1 int) returns table(a int, b int) as $$ begin a := a1; b := a1 + 1; -@@ -4265,14 +4902,16 @@ +@@ -4265,14 +4832,16 @@ return next; end; $$ language plpgsql immutable strict; @@ -237622,7 +236446,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create or replace function tftest(a1 int) returns table(a int, b int) as $$ + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/100226/v24.2 ++See: https://go.crdb.dev/issue-v/100226/dev select * from tftest(10); - a | b ------+----- @@ -237636,13 +236460,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function rttest() returns setof int as $$ declare rc int; -@@ -4291,19 +4930,11 @@ +@@ -4291,19 +4860,11 @@ raise notice '% %', found, rc; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from rttest(); -NOTICE: t 2 -NOTICE: f 0 @@ -237660,13 +236484,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- check some error cases, too create or replace function rttest() returns setof int as $$ -@@ -4311,25 +4942,26 @@ +@@ -4311,25 +4872,26 @@ return query select 10 into no_such_table; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from rttest(); -ERROR: SELECT INTO query does not return tuples -CONTEXT: SQL statement "select 10 into no_such_table" @@ -237680,7 +236504,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from rttest(); -ERROR: SELECT INTO query does not return tuples -CONTEXT: SQL statement "select 10 into no_such_table" @@ -237695,7 +236519,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Test for proper cleanup at subtransaction exit. This example -- exposed a bug in PG 8.2. CREATE FUNCTION leaker_1(fail BOOL) RETURNS INTEGER AS $$ -@@ -4344,6 +4976,8 @@ +@@ -4344,6 +4906,7 @@ RETURN 1; END; $$ LANGUAGE plpgsql; @@ -237703,7 +236527,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab CREATE FUNCTION leaker_2(fail BOOL, OUT error_code INTEGER, OUT new_id INTEGER) RETURNS RECORD AS $$ BEGIN -@@ -4356,18 +4990,11 @@ +@@ -4356,18 +4919,11 @@ END; $$ LANGUAGE plpgsql; SELECT * FROM leaker_1(false); @@ -237725,7 +236549,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab DROP FUNCTION leaker_2(bool); -- Test for appropriate cleanup of non-simple expression evaluations -- (bug in all versions prior to August 2010) -@@ -4385,13 +5012,27 @@ +@@ -4385,13 +4941,27 @@ RETURN arr; END; $$ LANGUAGE plpgsql; @@ -237758,13 +236582,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab CREATE FUNCTION nonsimple_expr_test() RETURNS integer AS $$ declare i integer NOT NULL := 0; -@@ -4405,13 +5046,13 @@ +@@ -4405,13 +4975,13 @@ return i; end; $$ LANGUAGE plpgsql; +ERROR: unimplemented: not-null PL/pgSQL variables are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105243/v24.2 ++See: https://go.crdb.dev/issue-v/105243/dev SELECT nonsimple_expr_test(); - nonsimple_expr_test ---------------------- @@ -237777,7 +236601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test cases involving recursion and error recovery in simple expressions -- (bugs in all versions before October 2010). The problems are most -@@ -4427,15 +5068,15 @@ +@@ -4427,15 +4997,13 @@ end if; end; $$ language plpgsql; @@ -237796,7 +236620,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function error1(text) returns text language sql as $$ SELECT relname::text FROM pg_class c WHERE c.oid = $1::regclass $$; create function error2(p_name_table text) returns text language plpgsql as $$ -@@ -4446,9 +5087,7 @@ +@@ -4446,9 +5014,7 @@ create table public.stuffs (stuff text); SAVEPOINT a; select error2('nonexistent.stuffs'); @@ -237807,7 +236631,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab ROLLBACK TO a; select error2('public.stuffs'); error2 -@@ -4463,7 +5102,23 @@ +@@ -4463,7 +5029,23 @@ create function sql_to_date(integer) returns date as $$ select $1::text::date $$ language sql immutable strict; @@ -237831,7 +236655,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function cast_invoker(integer) returns date as $$ begin return $1; -@@ -4471,62 +5126,81 @@ +@@ -4471,62 +5053,81 @@ select cast_invoker(20150717); cast_invoker -------------- @@ -237924,7 +236748,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab end; -- Test for consistent reporting of error context create function fail() returns int language plpgsql as $$ -@@ -4534,45 +5208,29 @@ +@@ -4534,45 +5135,29 @@ return 1/0; end $$; @@ -237981,7 +236805,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (1 row) create or replace function strtest() returns text as $$ -@@ -4625,21 +5283,16 @@ +@@ -4625,21 +5210,16 @@ RAISE NOTICE '%, %', r.roomno, r.comment; END LOOP; END$$; @@ -238011,7 +236835,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab DO $$ DECLARE r record; BEGIN -@@ -4648,11 +5301,10 @@ +@@ -4648,11 +5228,10 @@ RAISE NOTICE '%, %', r.roomno, r.comment; END LOOP; END$$; @@ -238027,7 +236851,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Check handling of errors thrown from/into anonymous code blocks. do $outer$ begin -@@ -4672,16 +5324,10 @@ +@@ -4672,16 +5251,10 @@ end loop; end; $outer$; @@ -238048,7 +236872,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Check variable scoping -- a var is not available in its own or prior -- default expressions, but it is available in later ones. do $$ -@@ -4690,11 +5336,10 @@ +@@ -4690,11 +5263,10 @@ raise notice 'x = %', x; end; $$; @@ -238064,7 +236888,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab do $$ declare y int := x + 1; -- error x int := 42; -@@ -4702,11 +5347,10 @@ +@@ -4702,11 +5274,10 @@ raise notice 'x = %, y = %', x, y; end; $$; @@ -238080,7 +236904,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab do $$ declare x int := 42; y int := x + 1; -@@ -4714,7 +5358,10 @@ +@@ -4714,7 +5285,10 @@ raise notice 'x = %, y = %', x, y; end; $$; @@ -238092,7 +236916,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab do $$ declare x int := 42; begin -@@ -4726,7 +5373,10 @@ +@@ -4726,7 +5300,10 @@ end; end; $$; @@ -238104,13 +236928,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Check handling of conflicts between plpgsql vars and table columns. set plpgsql.variable_conflict = error; create function conflict_test() returns setof int8_tbl as $$ -@@ -4738,13 +5388,11 @@ +@@ -4738,13 +5315,11 @@ end loop; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from conflict_test(); -ERROR: column reference "q1" is ambiguous -LINE 1: select q1,q2 from int8_tbl @@ -238122,13 +236946,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function conflict_test() returns setof int8_tbl as $$ #variable_conflict use_variable declare r record; -@@ -4755,16 +5403,11 @@ +@@ -4755,16 +5330,11 @@ end loop; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from conflict_test(); - q1 | q2 -----+------------------- @@ -238143,13 +236967,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function conflict_test() returns setof int8_tbl as $$ #variable_conflict use_column declare r record; -@@ -4775,17 +5418,13 @@ +@@ -4775,17 +5345,13 @@ end loop; end; $$ language plpgsql; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev select * from conflict_test(); - q1 | q2 -------------------+------------------- @@ -238166,7 +236990,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Check that an unreserved keyword can be used as a variable name create function unreserved_test() returns int as $$ declare -@@ -4795,12 +5434,15 @@ +@@ -4795,12 +5361,15 @@ return forward; end $$ language plpgsql; @@ -238187,7 +237011,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function unreserved_test() returns int as $$ declare return int := 42; -@@ -4809,12 +5451,20 @@ +@@ -4809,12 +5378,20 @@ return return; end $$ language plpgsql; @@ -238213,7 +237037,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function unreserved_test() returns int as $$ declare comment int := 21; -@@ -4824,19 +5474,26 @@ +@@ -4824,19 +5401,26 @@ return comment; end $$ language plpgsql; @@ -238230,7 +237054,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + comment on function unreserved_test() is 'this is a test'; + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/17511/v24.2 ++See: https://go.crdb.dev/issue-v/17511/dev select unreserved_test(); - unreserved_test ------------------ @@ -238250,7 +237074,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test FOREACH over arrays -- -@@ -4850,26 +5507,27 @@ +@@ -4850,26 +5434,27 @@ end loop; end; $$ language plpgsql; @@ -238283,12 +237107,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select foreach_test(ARRAY[1,2,3,4]); +ERROR: unknown function: foreach_test() +select foreach_test(ARRAY[[1,2],[3,4]]); @@ -238296,7 +237120,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function foreach_test(anyarray) returns void as $$ declare x int; -@@ -4880,13 +5538,28 @@ +@@ -4880,13 +5465,28 @@ end loop; end; $$ language plpgsql; @@ -238329,7 +237153,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function foreach_test(anyarray) returns void as $$ declare x int[]; -@@ -4897,21 +5570,27 @@ +@@ -4897,21 +5497,27 @@ end loop; end; $$ language plpgsql; @@ -238360,9 +237184,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +select foreach_test(ARRAY[1,2,3,4]); +ERROR: unknown function: foreach_test() +select foreach_test(ARRAY[[1,2],[3,4]]); @@ -238370,7 +237194,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- higher level of slicing create or replace function foreach_test(anyarray) returns void as $$ -@@ -4923,26 +5602,31 @@ +@@ -4923,26 +5529,31 @@ end loop; end; $$ language plpgsql; @@ -238417,7 +237241,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create type xy_tuple AS (x int, y int); -- iteration over array of records create or replace function foreach_test(anyarray) -@@ -4955,25 +5639,27 @@ +@@ -4955,25 +5566,27 @@ end loop; end; $$ language plpgsql; @@ -238452,9 +237276,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab + +If you would rather not post publicly, please contact us directly +using the support form. - -+We appreciate your feedback. + ++We appreciate your feedback. + +select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); +ERROR: unknown function: foreach_test() +select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); @@ -238462,7 +237286,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function foreach_test(anyarray) returns void as $$ declare x int; y int; -@@ -4984,25 +5670,27 @@ +@@ -4984,25 +5597,27 @@ end loop; end; $$ language plpgsql; @@ -238507,7 +237331,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- slicing over array of composite types create or replace function foreach_test(anyarray) returns void as $$ -@@ -5014,22 +5702,29 @@ +@@ -5014,22 +5629,29 @@ end loop; end; $$ language plpgsql; @@ -238535,12 +237359,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +Please check the public issue tracker to check whether this problem is +already tracked. If you cannot find it there, please report the error +with details by creating a new issue. - ++ +If you would rather not post publicly, please contact us directly +using the support form. + +We appreciate your feedback. -+ + +select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); +ERROR: unknown function: foreach_test() +select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); @@ -238550,7 +237374,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab drop type xy_tuple; -- -- Assorted tests for array subscript assignment -@@ -5043,28 +5738,34 @@ +@@ -5043,28 +5665,34 @@ r.ar[2] := 'replace'; return r.ar; end$$; @@ -238587,7 +237411,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create domain orderedarray as int[2] + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev select '{1,2}'::orderedarray; - orderedarray --------------- @@ -238601,7 +237425,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create function testoa(x1 int, x2 int, x3 int) returns orderedarray language plpgsql as $$ declare res orderedarray; -@@ -5073,26 +5774,19 @@ +@@ -5073,26 +5701,19 @@ res[2] := x3; return res; end$$; @@ -238635,7 +237459,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test handling of expanded arrays -- -@@ -5116,17 +5810,11 @@ +@@ -5116,17 +5737,11 @@ select i, a from (select returns_rw_array(1) as a offset 0) ss, lateral consumes_rw_array(a) i; @@ -238658,7 +237482,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab select i, a from (select returns_rw_array(1) as a offset 0) ss, lateral consumes_rw_array(a) i; -@@ -5137,13 +5825,11 @@ +@@ -5137,13 +5752,11 @@ explain (verbose, costs off) select consumes_rw_array(a), a from returns_rw_array(1) a; @@ -238677,7 +237501,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab select consumes_rw_array(a), a from returns_rw_array(1) a; consumes_rw_array | a -------------------+------- -@@ -5153,12 +5839,11 @@ +@@ -5153,12 +5766,11 @@ explain (verbose, costs off) select consumes_rw_array(a), a from (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); @@ -238695,7 +237519,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab select consumes_rw_array(a), a from (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); consumes_rw_array | a -@@ -5173,7 +5858,10 @@ +@@ -5173,7 +5785,10 @@ a := a || 3; raise notice 'a = %', a; end$$; @@ -238707,11 +237531,12 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test access to call stack -- -@@ -5190,6 +5878,18 @@ +@@ -5190,6 +5805,19 @@ return 2 * $1; end; $$ language plpgsql; +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_get_diag is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -238726,7 +237551,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function outer_func(int) returns int as $$ declare -@@ -5201,6 +5901,8 @@ +@@ -5201,6 +5829,7 @@ return myresult; end; $$ language plpgsql; @@ -238734,7 +237559,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function outer_outer_func(int) returns int as $$ declare -@@ -5212,44 +5914,19 @@ +@@ -5212,44 +5841,18 @@ return myresult; end; $$ language plpgsql; @@ -238785,7 +237610,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- access to call stack from exception create function inner_func(int) returns int as $$ -@@ -5272,6 +5949,26 @@ +@@ -5272,6 +5875,26 @@ return 2 * $1; end; $$ language plpgsql; @@ -238812,7 +237637,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function outer_func(int) returns int as $$ declare -@@ -5283,6 +5980,8 @@ +@@ -5283,6 +5906,7 @@ return myresult; end; $$ language plpgsql; @@ -238820,7 +237645,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab create or replace function outer_outer_func(int) returns int as $$ declare -@@ -5294,44 +5993,19 @@ +@@ -5294,44 +5918,18 @@ return myresult; end; $$ language plpgsql; @@ -238871,7 +237696,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- Test pg_routine_oid create function current_function(text) returns regprocedure as $$ -@@ -5342,13 +6016,17 @@ +@@ -5342,13 +5940,17 @@ return fn_oid; end; $$ language plpgsql; @@ -238894,7 +237719,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- shouldn't fail in DO, even though there's no useful data do $$ declare -@@ -5358,7 +6036,10 @@ +@@ -5358,7 +5960,10 @@ raise notice 'pg_routine_oid = %', fn_oid; end; $$; @@ -238906,7 +237731,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test ASSERT -- -@@ -5367,20 +6048,28 @@ +@@ -5367,20 +5972,28 @@ assert 1=1; -- should succeed end; $$; @@ -238939,7 +237764,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- check controlling GUC set plpgsql.check_asserts = off; do $$ -@@ -5388,6 +6077,10 @@ +@@ -5388,6 +6001,10 @@ assert 1=0; -- won't be tested end; $$; @@ -238950,7 +237775,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab reset plpgsql.check_asserts; -- test custom message do $$ -@@ -5396,8 +6089,10 @@ +@@ -5396,8 +6013,10 @@ assert 1=0, format('assertion failed, var = "%s"', var); end; $$; @@ -238963,7 +237788,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- ensure assertions are not trapped by 'others' do $$ begin -@@ -5406,32 +6101,52 @@ +@@ -5406,32 +6025,52 @@ null; -- do nothing end; $$; @@ -238983,7 +237808,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create domain plpgsql_domain as integer check(plpgsql_domain_check(value)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev do $$ declare v_test plpgsql_domain; begin @@ -239016,11 +237841,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab +create domain plpgsql_arr_domain as int[] check(plpgsql_arr_domain_check(value)) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/27796/v24.2 ++See: https://go.crdb.dev/issue-v/27796/dev do $$ declare v_test plpgsql_arr_domain; begin -@@ -5439,14 +6154,20 @@ +@@ -5439,14 +6078,20 @@ v_test := v_test || 2; end; $$; @@ -239043,34 +237868,30 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- test usage of transition tables in AFTER triggers -- -@@ -5472,25 +6193,31 @@ +@@ -5472,25 +6117,27 @@ RETURN new; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_base_ins_trig AFTER INSERT ON transition_table_base REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_base_ins_func(); -ERROR: OLD TABLE can only be specified for a DELETE or UPDATE trigger -+ERROR: at or near "transition_table_base_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_base_ins_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER transition_table_base_ins_trig AFTER INSERT ON transition_table_base REFERENCING NEW TABLE AS newtable FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_base_ins_func(); -+ERROR: at or near "transition_table_base_ins_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_base_ins_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO transition_table_base VALUES (1, 'One'), (2, 'Two'); -INFO: Named Tuplestore Scan - Output: id, val @@ -239082,22 +237903,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab CREATE OR REPLACE FUNCTION transition_table_base_upd_func() RETURNS trigger LANGUAGE plpgsql -@@ -5512,30 +6239,28 @@ +@@ -5512,30 +6159,27 @@ RETURN new; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_base_upd_trig AFTER UPDATE ON transition_table_base REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_base_upd_func(); -+ERROR: at or near "transition_table_base_upd_trig": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_base_upd_trig -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE transition_table_base SET val = '*' || val || '*' WHERE id BETWEEN 2 AND 3; @@ -239117,87 +237937,81 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab level1_node_name varchar(255), PRIMARY KEY (level1_no) ) WITHOUT OIDS; -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE transition_table_level2 ( level2_no serial NOT NULL , -@@ -5543,6 +6268,7 @@ +@@ -5543,6 +6187,7 @@ level1_node_name varchar(255), PRIMARY KEY (level2_no) ) WITHOUT OIDS; -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html CREATE TABLE transition_table_status ( level int NOT NULL, -@@ -5563,11 +6289,18 @@ +@@ -5563,11 +6208,17 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_level1_ri_parent_del_trigger AFTER DELETE ON transition_table_level1 REFERENCING OLD TABLE AS p FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level1_ri_parent_del_func(); -+ERROR: at or near "transition_table_level1_ri_parent_del_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_level1_ri_parent_del_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE FUNCTION transition_table_level1_ri_parent_upd_func() RETURNS TRIGGER LANGUAGE plpgsql -@@ -5590,11 +6323,18 @@ +@@ -5590,11 +6241,17 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_level1_ri_parent_upd_trigger AFTER UPDATE ON transition_table_level1 REFERENCING OLD TABLE AS d NEW TABLE AS i FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level1_ri_parent_upd_func(); -+ERROR: at or near "transition_table_level1_ri_parent_upd_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_level1_ri_parent_upd_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE FUNCTION transition_table_level2_ri_child_insupd_func() RETURNS TRIGGER LANGUAGE plpgsql -@@ -5610,16 +6350,29 @@ +@@ -5610,16 +6267,25 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_level2_ri_child_ins_trigger AFTER INSERT ON transition_table_level2 REFERENCING NEW TABLE AS i FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level2_ri_child_insupd_func(); -+ERROR: at or near "transition_table_level2_ri_child_ins_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_level2_ri_child_ins_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev CREATE TRIGGER transition_table_level2_ri_child_upd_trigger AFTER UPDATE ON transition_table_level2 REFERENCING NEW TABLE AS i FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level2_ri_child_insupd_func(); -+ERROR: at or near "transition_table_level2_ri_child_upd_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_level2_ri_child_upd_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- create initial test data INSERT INTO transition_table_level1 (level1_no) SELECT generate_series(1,200); -@@ -5627,6 +6380,7 @@ +@@ -5627,6 +6293,7 @@ INSERT INTO transition_table_level2 (level2_no, parent_no) SELECT level2_no, level2_no / 50 + 1 AS parent_no FROM generate_series(1,9999) level2_no; @@ -239205,22 +238019,21 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab ANALYZE transition_table_level2; INSERT INTO transition_table_status (level, node_no, status) SELECT 1, level1_no, 0 FROM transition_table_level1; -@@ -5646,35 +6400,37 @@ +@@ -5646,35 +6313,33 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER transition_table_level2_bad_usage_trigger AFTER DELETE ON transition_table_level2 REFERENCING OLD TABLE AS dx FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level2_bad_usage_func(); -+ERROR: at or near "transition_table_level2_bad_usage_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER transition_table_level2_bad_usage_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev DELETE FROM transition_table_level2 WHERE level2_no BETWEEN 301 AND 305; -ERROR: relation "dx" cannot be the target of a modifying statement @@ -239228,12 +238041,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -PL/pgSQL function transition_table_level2_bad_usage_func() line 3 at SQL statement DROP TRIGGER transition_table_level2_bad_usage_trigger ON transition_table_level2; -+ERROR: at or near "transition_table_level2_bad_usage_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+DROP TRIGGER transition_table_level2_bad_usage_trigger -+ ^ ++ERROR: unimplemented: DROP TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- attempt modifications which would break RI (should all fail) DELETE FROM transition_table_level1 WHERE level1_no = 25; @@ -239254,7 +238064,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- attempt modifications which would not break RI (should all succeed) DELETE FROM transition_table_level1 WHERE level1_no BETWEEN 201 AND 1000; -@@ -5683,7 +6439,7 @@ +@@ -5683,7 +6348,7 @@ SELECT count(*) FROM transition_table_level1; count ------- @@ -239263,7 +238073,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (1 row) DELETE FROM transition_table_level2 -@@ -5691,7 +6447,7 @@ +@@ -5691,7 +6356,7 @@ SELECT count(*) FROM transition_table_level2; count ------- @@ -239272,11 +238082,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab (1 row) CREATE TABLE alter_table_under_transition_tables -@@ -5711,42 +6467,49 @@ +@@ -5711,42 +6376,45 @@ RETURN NULL; END; $$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev -- should fail, TRUNCATE is not compatible with transition tables CREATE TRIGGER alter_table_under_transition_tables_upd_trigger AFTER TRUNCATE OR UPDATE ON alter_table_under_transition_tables @@ -239284,24 +238096,18 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab FOR EACH STATEMENT EXECUTE PROCEDURE alter_table_under_transition_tables_upd_func(); -ERROR: TRUNCATE triggers with transition tables are not supported -+ERROR: at or near "alter_table_under_transition_tables_upd_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER alter_table_under_transition_tables_upd_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev -- should work CREATE TRIGGER alter_table_under_transition_tables_upd_trigger AFTER UPDATE ON alter_table_under_transition_tables REFERENCING OLD TABLE AS d NEW TABLE AS i FOR EACH STATEMENT EXECUTE PROCEDURE alter_table_under_transition_tables_upd_func(); -+ERROR: at or near "alter_table_under_transition_tables_upd_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER alter_table_under_transition_tables_upd_trigger -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev INSERT INTO alter_table_under_transition_tables VALUES (1, '1'), (2, '2'), (3, '3'); UPDATE alter_table_under_transition_tables @@ -239312,7 +238118,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab ALTER TABLE alter_table_under_transition_tables ALTER COLUMN name TYPE int USING name::integer; +ERROR: ALTER COLUMN TYPE from string to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` UPDATE alter_table_under_transition_tables @@ -239332,20 +238138,19 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Test multiple reference to a transition table -- -@@ -5761,22 +6524,45 @@ +@@ -5761,22 +6429,44 @@ FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss); RETURN NULL; END$$; -+ERROR: type "trigger" does not exist ++ERROR: unimplemented: trigger functions are not yet supported ++HINT: You have attempted to use a feature that is not yet implemented. ++See: https://go.crdb.dev/issue-v/126356/dev CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test REFERENCING NEW TABLE AS new_test OLD TABLE as old_test FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig(); -+ERROR: at or near "my_trigger": syntax error: unimplemented: this syntax -+DETAIL: source SQL: -+CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test -+ ^ ++ERROR: unimplemented: CREATE TRIGGER +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/28296/v24.2 ++See: https://go.crdb.dev/issue-v/126359/dev UPDATE multi_test SET i = i; -NOTICE: count = 1 -NOTICE: count union = 2 @@ -239380,7 +238185,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type) RETURNS partitioned_table AS $$ DECLARE -@@ -5787,13 +6573,13 @@ +@@ -5787,13 +6477,13 @@ SELECT * INTO result FROM partitioned_table WHERE a = a_val; RETURN result; END; $$ LANGUAGE plpgsql; @@ -239400,7 +238205,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab CREATE OR REPLACE FUNCTION list_partitioned_table() RETURNS SETOF partitioned_table.a%TYPE AS $$ DECLARE -@@ -5806,14 +6592,13 @@ +@@ -5806,14 +6496,13 @@ END LOOP; RETURN; END; $$ LANGUAGE plpgsql; @@ -239421,7 +238226,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -- -- Check argument name is used instead of $n in error message -- -@@ -5822,6 +6607,15 @@ +@@ -5822,6 +6511,16 @@ GET DIAGNOSTICS x = ROW_COUNT; RETURN; END; $$ LANGUAGE plpgsql; @@ -239429,6 +238234,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/plpgsql.out --lab -LINE 3: GET DIAGNOSTICS x = ROW_COUNT; - ^ +ERROR: unimplemented: attempted to use a PL/pgSQL statement that is not yet supported ++DETAIL: stmt_get_diag is not yet supported +HINT: You have attempted to use a feature that is not yet implemented. + +Please check the public issue tracker to check whether this problem is @@ -240088,7 +238894,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_info.ou +CREATE TABLE ptif_li_child () INHERITS (ptif_li_parent) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev SELECT * FROM pg_partition_tree('ptif_test_view'); - relid | parentrelid | isleaf | level --------+-------------+--------+------- @@ -240178,7 +238984,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/reloptions.out -- autovacuum_enabled = false, autovacuum_analyze_scale_factor = 0.2); +ERROR: unimplemented: storage parameter "autovacuum_analyze_scale_factor" +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/43299/v24.2 ++See: https://go.crdb.dev/issue-v/43299/dev SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; - reloptions ------------------------------------------------------------------------------- @@ -240545,11 +239351,11 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev -- To produce valid JSON output, replace numbers with "0" or "0.0" not "N" create function explain_filter_to_json(text) returns jsonb language plpgsql as -@@ -47,281 +50,110 @@ +@@ -47,247 +50,91 @@ return data::jsonb; end; $$; @@ -240862,9 +239668,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab -- SETTINGS option -- We have to ignore other settings that might be imposed by the environment, -- so printing the whole Settings field unfortunately won't do. - begin; - set local plan_cache_mode = force_generic_plan; -+ERROR: unrecognized configuration parameter "plan_cache_mode" +@@ -296,32 +143,16 @@ select true as "OK" from explain_filter('explain (settings) select * from int8_tbl i8') ln where ln ~ '^ *Settings: .*plan_cache_mode = ''force_generic_plan'''; @@ -240873,7 +239677,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab - t -(1 row) - -+ERROR: current transaction is aborted, commands ignored until end of transaction block ++ERROR: unknown function: explain_filter() select explain_filter_to_json('explain (settings, format json) select * from int8_tbl i8') #> '{0,Settings,plan_cache_mode}'; - ?column? ----------------------- @@ -240901,7 +239705,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab -- Test EXPLAIN (GENERIC_PLAN) with partition pruning -- partitions should be pruned at plan time, based on constants, -- but there should be no pruning based on parameter placeholders -@@ -329,27 +161,52 @@ +@@ -329,27 +160,52 @@ key1 integer not null, key2 integer not null ) partition by list (key1); @@ -240963,7 +239767,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab -- -- Test production of per-worker data -- -@@ -360,9 +217,24 @@ +@@ -360,9 +216,24 @@ begin; -- encourage use of parallel plans set parallel_setup_cost=0; @@ -240988,7 +239792,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab select jsonb_pretty( explain_filter_to_json('explain (analyze, verbose, buffers, format json) select * from tenk1 order by tenthous') -@@ -374,188 +246,19 @@ +@@ -374,188 +245,19 @@ #- '{0,Plan,Plans,0,Sort Method}' #- '{0,Plan,Plans,0,Sort Space Type}' ); @@ -241162,7 +239966,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/explain.out --lab as 'begin return sin($1); end'; +ERROR: unimplemented: cannot create UDFs under a temporary schema +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/104687/v24.2 ++See: https://go.crdb.dev/issue-v/104687/dev select explain_filter('explain (verbose) select * from t1 where pg_temp.mysin(f1) < 0.5'); - explain_filter ------------------------------------------------------------- @@ -241193,7 +239997,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/memoize.out --lab $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev -- Ensure we get a memoize node on the inner side of the nested loop SET enable_hashjoin TO off; +ERROR: unimplemented: the configuration setting "enable_hashjoin" is not supported @@ -241756,7 +240560,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/memoize.out --lab diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/compression_1.out --label=/mnt/data1/postgres/src/test/regress/results/compression.out /mnt/data1/postgres/src/test/regress/expected/compression_1.out /mnt/data1/postgres/src/test/regress/results/compression.out --- /mnt/data1/postgres/src/test/regress/expected/compression_1.out +++ /mnt/data1/postgres/src/test/regress/results/compression.out -@@ -1,356 +1,434 @@ +@@ -1,356 +1,433 @@ \set HIDE_TOAST_COMPRESSION false -- ensure we get stable results regardless of installation's default SET default_toast_compression = 'pglz'; @@ -242002,7 +240806,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/compression_1.out + ^ ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +ERROR: ALTER COLUMN TYPE from int to varchar is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` \d+ cmdata2 @@ -242022,7 +240826,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/compression_1.out + ^ ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer; +ERROR: ALTER COLUMN TYPE from int to int is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` \d+ cmdata2 @@ -242044,7 +240848,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/compression_1.out --but the data should not be compressed ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +ERROR: ALTER COLUMN TYPE from int to varchar is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz; @@ -242177,7 +240981,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/compression_1.out +CREATE TABLE cminh() INHERITS(cmdata, cmdata1) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -NOTICE: merging column "f1" with inherited definition -ERROR: column "f1" has a compression method conflict @@ -255027,11 +253831,9 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_join.ou diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.out --label=/mnt/data1/postgres/src/test/regress/results/partition_prune.out /mnt/data1/postgres/src/test/regress/expected/partition_prune.out /mnt/data1/postgres/src/test/regress/results/partition_prune.out --- /mnt/data1/postgres/src/test/regress/expected/partition_prune.out +++ /mnt/data1/postgres/src/test/regress/results/partition_prune.out -@@ -3,1484 +3,1140 @@ - -- +@@ -4,1483 +4,1138 @@ -- Force generic plans to be used for all prepared statements in this file. set plan_cache_mode = force_generic_plan; -+ERROR: unrecognized configuration parameter "plan_cache_mode" create table lp (a char) partition by list (a); +ERROR: at or near "EOF": syntax error +DETAIL: source SQL: @@ -257432,7 +256234,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- -- some more cases -- -@@ -1489,253 +1145,237 @@ +@@ -1489,253 +1144,237 @@ -- -- pruning won't work for mc3p, because some keys are Params explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = t1.b and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; @@ -257857,7 +256659,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- -- Test Partition pruning for HASH partitioning -- -@@ -1745,402 +1385,391 @@ +@@ -1745,402 +1384,391 @@ -- create table hp (a int, b text, c int) partition by hash (a part_test_int4_ops, b part_test_text_ops); @@ -258459,7 +257261,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); +ERROR: unimplemented: DECLARE SCROLL CURSOR +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/77102/v24.2 ++See: https://go.crdb.dev/issue-v/77102/dev -- move beyond the final row move 3 from cur; +ERROR: current transaction is aborted, commands ignored until end of transaction block @@ -258541,13 +257343,13 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- Parallel append -- Parallel queries won't necessarily get as many workers as the planner -- asked for. This affects not only the "Workers Launched:" field of EXPLAIN -@@ -2167,111 +1796,81 @@ +@@ -2167,111 +1795,81 @@ end loop; end; $$; +ERROR: unimplemented: set-returning PL/pgSQL functions are not yet supported +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/105240/v24.2 ++See: https://go.crdb.dev/issue-v/105240/dev prepare ab_q4 (int, int) as select avg(a) from ab where a between $1 and $2 and b < 4; +ERROR: relation "ab" does not exist @@ -258711,7 +257513,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- Test pruning during parallel nested loop query create table lprt_a (a int not null); -- Insert some values we won't find in ab -@@ -2280,344 +1879,198 @@ +@@ -2280,344 +1878,198 @@ insert into lprt_a values(1),(1); analyze lprt_a; create index ab_a2_b1_a_idx on ab_a2_b1 (a); @@ -259207,7 +258009,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o prepare ab_q6 as select * from ( select tableoid::regclass,a,b from ab -@@ -2626,587 +2079,522 @@ +@@ -2626,587 +2078,522 @@ union all select tableoid::regclass,a,b from ab ) ab where a = $1 and b = (select -10); @@ -260136,7 +258938,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- -- Check that pruning with composite range partitioning works correctly when -- a combination of runtime parameters is specified, not all of whose values -@@ -3214,529 +2602,666 @@ +@@ -3214,529 +2601,666 @@ -- prepare ps1 as select * from mc3p where a = $1 and abs(b) < (select 3); @@ -260951,7 +259753,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o +create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); -NOTICE: merging column "a" with inherited definition -NOTICE: merging column "value" with inherited definition @@ -260960,7 +259762,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o +create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp) + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/22456/v24.2 ++See: https://go.crdb.dev/issue-v/22456/dev set constraint_exclusion = 'partition'; +ERROR: unimplemented: the configuration setting "constraint_exclusion" is not supported +HINT: You have attempted to use a feature that is not yet implemented. @@ -261173,7 +259975,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o explain (costs off) select * from ( -@@ -3747,19 +3272,11 @@ +@@ -3747,19 +3271,11 @@ select 1, 1, 1 ) s(a, b, c) where s.a = 1 and s.b = 1 and s.c = (select 1); @@ -261198,7 +260000,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o select * from ( select * from p -@@ -3769,11 +3286,7 @@ +@@ -3769,11 +3285,7 @@ select 1, 1, 1 ) s(a, b, c) where s.a = 1 and s.b = 1 and s.c = (select 1); @@ -261211,7 +260013,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o prepare q (int, int) as select * from ( -@@ -3784,116 +3297,186 @@ +@@ -3784,116 +3296,186 @@ select 1, 1, 1 ) s(a, b, c) where s.a = $1 and s.b = $2 and s.c = (select 1); @@ -261466,7 +260268,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- Like the above but throw some more complexity at the planner by adding -- a UNION ALL. We expect both sides of the union not to scan the -- non-required partitions. -@@ -3901,136 +3484,236 @@ +@@ -3901,136 +3483,236 @@ 'select * from listp where a = (select 1) union all select * from listp where a = (select 2);'); @@ -261779,7 +260581,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o -- -- Check that gen_partprune_steps() detects self-contradiction from clauses -- regardless of the order of the clauses (Here we use a custom operator to -@@ -4043,28 +3726,84 @@ +@@ -4043,28 +3725,84 @@ commutator = ===, hashes ); @@ -261788,7 +260590,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o +create operator === ( + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create operator class part_test_int4_ops2 for type int4 using hash as @@ -261799,7 +260601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/partition_prune.o +create operator class part_test_int4_ops2 + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev create table hp_contradict_test (a int, b int) partition by hash (a part_test_int4_ops2, b part_test_int4_ops2); +ERROR: at or near "hash": syntax error +DETAIL: source SQL: @@ -262908,7 +261710,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/stats.out --label ROLLBACK TO SAVEPOINT p1; +ERROR: unimplemented: ROLLBACK TO SAVEPOINT not yet supported after DDL statements +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/10735/v24.2 ++See: https://go.crdb.dev/issue-v/10735/dev COMMIT; -- rollback a truncate: this should count 2 inserts and produce 2 dead tuples BEGIN; @@ -265459,7 +264261,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out +SET SESSION AUTHORIZATION regress_evt_user + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/40283/v24.2 ++See: https://go.crdb.dev/issue-v/40283/dev CREATE TABLE schema_one.table_one(a int); +ERROR: cannot create "schema_one.table_one" because the target database or schema does not exist +HINT: verify that the current database and search_path are valid and/or the target database exists @@ -265493,7 +264295,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out +CREATE AGGREGATE schema_two.newton + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/74775/v24.2 ++See: https://go.crdb.dev/issue-v/74775/dev RESET SESSION AUTHORIZATION; +ERROR: at or near "authorization": syntax error +DETAIL: source SQL: @@ -265799,7 +264601,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out +CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/65017/v24.2 ++See: https://go.crdb.dev/issue-v/65017/dev DROP EVENT TRIGGER regress_event_trigger_report_dropped; +ERROR: at or near "event": syntax error +DETAIL: source SQL: @@ -265832,14 +264634,14 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out + ^ +HINT: try \h CREATE create table rewriteme (id serial primary key, foo float, bar timestamptz); -+NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/v24.2/serial.html ++NOTICE: using sequential values in a primary key does not perform as well as using random UUIDs. See https://www.cockroachlabs.com/docs/dev/serial.html insert into rewriteme select x * 1.001 from generate_series(1, 500) as t(x); alter table rewriteme alter column foo type numeric; -ERROR: rewrites not allowed -CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE +ERROR: ALTER COLUMN TYPE from float to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` alter table rewriteme add column baz int default 0; @@ -265856,7 +264658,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out alter column foo type numeric(10,4); -NOTICE: Table 'rewriteme' is being rewritten (reason = 4) +ERROR: ALTER COLUMN TYPE from float to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` -- matview rewrite when changing access method @@ -265878,7 +264680,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out -- shouldn't trigger a table_rewrite event alter table rewriteme alter column foo type numeric(12,4); +ERROR: ALTER COLUMN TYPE from float to decimal is only supported experimentally -+HINT: See: https://go.crdb.dev/issue-v/49329/v24.2 ++HINT: See: https://go.crdb.dev/issue-v/49329/dev +-- +you can enable alter column type general support by running `SET enable_experimental_alter_column_type_general = true` begin; @@ -265918,7 +264720,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out +alter type rewritetype alter attribute a type text cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev -- but this doesn't work create table rewritemetoo3 (a rewritetype); alter type rewritetype alter attribute a type varchar cascade; @@ -265928,7 +264730,7 @@ diff -U3 --label=/mnt/data1/postgres/src/test/regress/expected/event_trigger.out +alter type rewritetype alter attribute a type varchar cascade + ^ +HINT: You have attempted to use a feature that is not yet implemented. -+See: https://go.crdb.dev/issue-v/48701/v24.2 ++See: https://go.crdb.dev/issue-v/48701/dev drop table rewriteme; drop event trigger no_rewrite_allowed; +ERROR: at or near "event": syntax error diff --git a/pkg/cmd/roachtest/tests/BUILD.bazel b/pkg/cmd/roachtest/tests/BUILD.bazel index 5b608c872706..fb19090b3a57 100644 --- a/pkg/cmd/roachtest/tests/BUILD.bazel +++ b/pkg/cmd/roachtest/tests/BUILD.bazel @@ -23,7 +23,7 @@ go_library( "admission_control_multitenant_fairness.go", "admission_control_row_level_ttl.go", "admission_control_snapshot_overload.go", - "admission_control_snapshot_overload_excise.go", + "admission_control_snapshot_overload_io.go", "admission_control_tpcc_overload.go", "allocation_bench.go", "allocator.go", @@ -110,6 +110,7 @@ go_library( "many_splits.go", "mismatched_locality.go", "mixed_version_backup.go", + "mixed_version_c2c.go", "mixed_version_cdc.go", "mixed_version_change_replicas.go", "mixed_version_decl_schemachange_compat.go", @@ -248,6 +249,7 @@ go_library( "//pkg/server/serverpb", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog/catpb", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", diff --git a/pkg/cmd/roachtest/tests/acceptance.go b/pkg/cmd/roachtest/tests/acceptance.go index b71f7a43795e..dd70f329fc38 100644 --- a/pkg/cmd/roachtest/tests/acceptance.go +++ b/pkg/cmd/roachtest/tests/acceptance.go @@ -36,7 +36,6 @@ func registerAcceptance(r registry.Registry) { defaultLeases bool requiresLicense bool randomized bool - nativeLibs []string workloadNode bool incompatibleClouds registry.CloudSet }{ @@ -82,7 +81,6 @@ func registerAcceptance(r registry.Registry) { timeout: 2 * time.Hour, // actually lower in local runs; see `runVersionUpgrade` defaultLeases: true, randomized: true, - nativeLibs: registry.LibGEOS, }, }, registry.OwnerDisasterRecovery: { @@ -164,9 +162,6 @@ func registerAcceptance(r registry.Registry) { if !tc.defaultLeases { testSpec.Leases = registry.MetamorphicLeases } - if len(tc.nativeLibs) > 0 { - testSpec.NativeLibs = tc.nativeLibs - } testSpec.Run = func(ctx context.Context, t test.Test, c cluster.Cluster) { tc.fn(ctx, t, c) } diff --git a/pkg/cmd/roachtest/tests/admission_control.go b/pkg/cmd/roachtest/tests/admission_control.go index 62f67207cf4e..198b377ac4e1 100644 --- a/pkg/cmd/roachtest/tests/admission_control.go +++ b/pkg/cmd/roachtest/tests/admission_control.go @@ -35,7 +35,7 @@ func registerAdmission(r registry.Registry) { registerMultiStoreOverload(r) registerMultiTenantFairness(r) registerSnapshotOverload(r) - registerSnapshotOverloadExcise(r) + registerSnapshotOverloadIO(r) registerTPCCOverload(r) registerTPCCSevereOverload(r) registerIndexOverload(r) diff --git a/pkg/cmd/roachtest/tests/admission_control_disk_bandwidth_overload.go b/pkg/cmd/roachtest/tests/admission_control_disk_bandwidth_overload.go index 410a6417fb4e..e9eca325cc80 100644 --- a/pkg/cmd/roachtest/tests/admission_control_disk_bandwidth_overload.go +++ b/pkg/cmd/roachtest/tests/admission_control_disk_bandwidth_overload.go @@ -44,7 +44,7 @@ func registerDiskBandwidthOverload(r registry.Registry) { Owner: registry.OwnerAdmissionControl, Timeout: time.Hour, Benchmark: true, - CompatibleClouds: registry.AllClouds, + CompatibleClouds: registry.AllExceptAzure, // TODO(aaditya): change to weekly once the test stabilizes. Suites: registry.Suites(registry.Nightly), Cluster: r.MakeClusterSpec(2, spec.CPU(8), spec.WorkloadNode()), @@ -76,47 +76,8 @@ func registerDiskBandwidthOverload(r registry.Registry) { setAdmissionControl(ctx, t, c, true) - // TODO(aaditya): This function shares some of the logic with roachtestutil.DiskStaller. Consider merging the two. - setBandwidthLimit := func(nodes option.NodeListOption, rw string, bw int, max bool) error { - dataMount := "/mnt/data1" - if c.Cloud() == spec.Azure { - dataMount = "sda1" - } - res, err := c.RunWithDetailsSingleNode(context.TODO(), t.L(), option.WithNodes(nodes[:1]), - fmt.Sprintf("lsblk | grep %s | awk '{print $2}'", dataMount), - ) - if err != nil { - t.Fatalf("error when determining block device: %s", err) - } - parts := strings.Split(strings.TrimSpace(res.Stdout), ":") - if len(parts) != 2 { - t.Fatalf("unexpected output from lsblk: %s", res.Stdout) - } - major, err := strconv.Atoi(parts[0]) - if err != nil { - t.Fatalf("error when determining block device: %s", err) - } - minor, err := strconv.Atoi(parts[1]) - if err != nil { - t.Fatalf("error when determining block device: %s", err) - } - - cockroachIOController := filepath.Join("/sys/fs/cgroup/system.slice", roachtestutil.SystemInterfaceSystemdUnitName()+".service", "io.max") - bytesPerSecondStr := "max" - if !max { - bytesPerSecondStr = fmt.Sprintf("%d", bw) - } - return c.RunE(ctx, option.WithNodes(nodes), "sudo", "/bin/bash", "-c", fmt.Sprintf( - `'echo %d:%d %s=%s > %s'`, - major, - minor, - rw, - bytesPerSecondStr, - cockroachIOController, - )) - } - - if err := setBandwidthLimit(c.CRDBNodes(), "wbps", 128<<20 /* 128MiB */, false); err != nil { + dataDir := "/mnt/data1" + if err := setBandwidthLimit(ctx, t, c, c.CRDBNodes(), "wbps", 128<<20 /* 128MiB */, false, dataDir); err != nil { t.Fatal(err) } @@ -234,3 +195,48 @@ func registerDiskBandwidthOverload(r registry.Registry) { }, }) } + +// TODO(aaditya): This function shares some of the logic with roachtestutil.DiskStaller. Consider merging the two. +func setBandwidthLimit( + ctx context.Context, + t test.Test, + c cluster.Cluster, + nodes option.NodeListOption, + rw string, + bw int, + max bool, + dataDir string, +) error { + res, err := c.RunWithDetailsSingleNode(context.TODO(), t.L(), option.WithNodes(nodes[:1]), + fmt.Sprintf("lsblk | grep %s | awk '{print $2}'", dataDir), + ) + if err != nil { + t.Fatalf("error when determining block device: %s", err) + } + parts := strings.Split(strings.TrimSpace(res.Stdout), ":") + if len(parts) != 2 { + t.Fatalf("unexpected output from lsblk: %s", res.Stdout) + } + major, err := strconv.Atoi(parts[0]) + if err != nil { + t.Fatalf("error when determining block device: %s", err) + } + minor, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("error when determining block device: %s", err) + } + + cockroachIOController := filepath.Join("/sys/fs/cgroup/system.slice", roachtestutil.SystemInterfaceSystemdUnitName()+".service", "io.max") + bytesPerSecondStr := "max" + if !max { + bytesPerSecondStr = fmt.Sprintf("%d", bw) + } + return c.RunE(ctx, option.WithNodes(nodes), "sudo", "/bin/bash", "-c", fmt.Sprintf( + `'echo %d:%d %s=%s > %s'`, + major, + minor, + rw, + bytesPerSecondStr, + cockroachIOController, + )) +} diff --git a/pkg/cmd/roachtest/tests/admission_control_latency.go b/pkg/cmd/roachtest/tests/admission_control_latency.go index e1733fd794cb..7f460c7cca92 100644 --- a/pkg/cmd/roachtest/tests/admission_control_latency.go +++ b/pkg/cmd/roachtest/tests/admission_control_latency.go @@ -533,18 +533,19 @@ func (v variations) runTest(ctx context.Context, t test.Test, c cluster.Cluster) // TODO(baptist): Remove this block once #120073 is fixed. db := c.Conn(ctx, t.L(), 1) defer db.Close() - if _, err := db.Exec( + if _, err := db.ExecContext(ctx, `SET CLUSTER SETTING kv.lease.reject_on_leader_unknown.enabled = true`); err != nil { t.Fatal(err) } // This isn't strictly necessary, but it would be nice if this test passed at 10s (or lower). - if _, err := db.Exec( + if _, err := db.ExecContext(ctx, `SET CLUSTER SETTING server.time_after_store_suspect = '10s'`); err != nil { t.Fatal(err) } // Avoid stores up-replicating away from the target node, reducing the // backlog of work. - if _, err := db.Exec( + if _, err := db.ExecContext( + ctx, fmt.Sprintf( `SET CLUSTER SETTING server.time_until_store_dead = '%s'`, v.perturbationDuration+time.Minute)); err != nil { t.Fatal(err) @@ -565,8 +566,8 @@ func (v variations) runTest(ctx context.Context, t test.Test, c cluster.Cluster) if t.count > 0 { return time.Duration(math.Sqrt((float64(v.numWorkloadNodes+v.vcpu) * float64((t.p50 + t.p99/3 + t.p999/10)) / float64(t.count) * float64(time.Second)))) } else { - - return time.Duration(math.Inf(1)) + // Use a non-infinite score that is still very high if there was a period of no throughput. + return time.Hour } }) }) diff --git a/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go b/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go index c0bbb5e7d75f..38a5b7dc8743 100644 --- a/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go +++ b/pkg/cmd/roachtest/tests/admission_control_multitenant_fairness.go @@ -241,7 +241,7 @@ func runMultiTenantFairness( Flag("batch", s.batch). Flag("max-ops", s.maxOps). Flag("concurrency", 25). - Arg(pgurl) + Arg("%s", pgurl) if err := c.RunE(ctx, option.WithNodes(node), cmd.String()); err != nil { return err @@ -276,7 +276,7 @@ func runMultiTenantFairness( Flag("duration", s.duration). Flag("read-percent", s.readPercent). Flag("concurrency", s.concurrency(n)). - Arg(pgurl) + Arg("%s", pgurl) if err := c.RunE(ctx, option.WithNodes(node), cmd.String()); err != nil { return err diff --git a/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_excise.go b/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_excise.go deleted file mode 100644 index 0a4a28c60174..000000000000 --- a/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_excise.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2024 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package tests - -import ( - "context" - "fmt" - "time" - - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/clusterstats" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/grafana" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" - "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" - "github.com/cockroachdb/cockroach/pkg/roachprod/install" - "github.com/cockroachdb/cockroach/pkg/roachprod/prometheus" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/errors" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" -) - -// This test aims to test the behavior of range snapshots with splits and -// excises enabled in the storage engine. It sets up a 3 node cluster, where the -// cluster is pre-populated with about 500GB of data. Then, a foreground kv -// workload is run, and shortly after that, n3 is brought down. Upon restart, n3 -// starts to receive large amounts of snapshot data. With excises turned on, it -// is expected that l0 sublevel counts and p99 latencies remain stable. -func registerSnapshotOverloadExcise(r registry.Registry) { - r.Add(registry.TestSpec{ - Name: "admission-control/snapshot-overload-excise", - Owner: registry.OwnerAdmissionControl, - Benchmark: true, - CompatibleClouds: registry.OnlyGCE, - Suites: registry.Suites(registry.Weekly), - // The test uses a large volume size to ensure high provisioned bandwidth - // from the cloud provider. - Cluster: r.MakeClusterSpec(4, spec.CPU(4), spec.WorkloadNode(), spec.VolumeSize(2000)), - Leases: registry.MetamorphicLeases, - Timeout: 12 * time.Hour, - Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { - if c.Spec().NodeCount < 4 { - t.Fatalf("expected at least 4 nodes, found %d", c.Spec().NodeCount) - } - - envOptions := install.EnvOption{ - // COCKROACH_CONCURRENT_COMPACTIONS is set to 1 since we want to ensure - // that snapshot ingests don't result in LSM inversion even with a very - // low compaction rate. With Pebble's IngestAndExcise all the ingested - // sstables should ingest into L6. - "COCKROACH_CONCURRENT_COMPACTIONS=1", - // COCKROACH_RAFT_LOG_TRUNCATION_THRESHOLD is reduced so that there is - // certainty that the restarted node will be caught up via snapshots, - // and not via raft log replay. - fmt.Sprintf("COCKROACH_RAFT_LOG_TRUNCATION_THRESHOLD=%d", 512<<10 /* 512KiB */), - // COCKROACH_CONCURRENT_SNAPSHOT* is increased so that the rate of - // snapshot application is high. - "COCKROACH_CONCURRENT_SNAPSHOT_APPLY_LIMIT=100", - "COCKROACH_CONCURRENT_SNAPSHOT_SEND_LIMIT=100", - } - - startOpts := option.NewStartOpts(option.NoBackupSchedule) - roachtestutil.SetDefaultAdminUIPort(c, &startOpts.RoachprodOpts) - roachtestutil.SetDefaultSQLPort(c, &startOpts.RoachprodOpts) - settings := install.MakeClusterSettings(envOptions) - c.Start(ctx, t.L(), startOpts, settings, c.CRDBNodes()) - - db := c.Conn(ctx, t.L(), len(c.CRDBNodes())) - defer db.Close() - - t.Status(fmt.Sprintf("configuring cluster settings (<%s)", 30*time.Second)) - { - // Defensive, since admission control is enabled by default. - setAdmissionControl(ctx, t, c, true) - // Ensure ingest splits and excises are enabled. (Enabled by default in v24.1+) - if _, err := db.ExecContext( - ctx, "SET CLUSTER SETTING kv.snapshot_receiver.excise.enabled = 'true'"); err != nil { - t.Fatalf("failed to set kv.snapshot_receiver.excise.enabled: %v", err) - } - if _, err := db.ExecContext( - ctx, "SET CLUSTER SETTING storage.ingest_split.enabled = 'true'"); err != nil { - t.Fatalf("failed to set storage.ingest_split.enabled: %v", err) - } - - // Set a high rebalance rate. - if _, err := db.ExecContext( - ctx, "SET CLUSTER SETTING kv.snapshot_rebalance.max_rate = '256MiB'"); err != nil { - t.Fatalf("failed to set kv.snapshot_rebalance.max_rate: %v", err) - } - } - - // Setup the prometheus instance and client. - t.Status(fmt.Sprintf("setting up prometheus/grafana (<%s)", 2*time.Minute)) - var statCollector clusterstats.StatCollector - promCfg := &prometheus.Config{} - promCfg.WithPrometheusNode(c.WorkloadNode().InstallNodes()[0]). - WithNodeExporter(c.CRDBNodes().InstallNodes()). - WithCluster(c.CRDBNodes().InstallNodes()). - WithGrafanaDashboardJSON(grafana.SnapshotAdmissionControlGrafanaJSON) - err := c.StartGrafana(ctx, t.L(), promCfg) - require.NoError(t, err) - cleanupFunc := func() { - if err := c.StopGrafana(ctx, t.L(), t.ArtifactsDir()); err != nil { - t.L().ErrorfCtx(ctx, "Error(s) shutting down prom/grafana %s", err) - } - } - defer cleanupFunc() - promClient, err := clusterstats.SetupCollectorPromClient(ctx, c, t.L(), promCfg) - require.NoError(t, err) - statCollector = clusterstats.NewStatsCollector(ctx, promClient) - - // Initialize the kv database, - t.Status(fmt.Sprintf("initializing kv dataset (<%s)", 2*time.Hour)) - c.Run(ctx, option.WithNodes(c.WorkloadNode()), - "./cockroach workload init kv --drop --insert-count=40000000 "+ - "--max-block-bytes=12288 --min-block-bytes=12288 {pgurl:1-3}") - - t.Status(fmt.Sprintf("starting kv workload thread (<%s)", time.Minute)) - m := c.NewMonitor(ctx, c.CRDBNodes()) - m.Go(func(ctx context.Context) error { - c.Run(ctx, option.WithNodes(c.WorkloadNode()), - fmt.Sprintf("./cockroach workload run kv --tolerate-errors "+ - "--splits=1000 --histograms=%s/stats.json --read-percent=75 "+ - "--max-rate=600 --max-block-bytes=12288 --min-block-bytes=12288 "+ - "--concurrency=4000 --duration=%s {pgurl:1-2}", - t.PerfArtifactsDir(), (6*time.Hour).String())) - return nil - }) - - t.Status(fmt.Sprintf("waiting for data build up (<%s)", time.Hour)) - time.Sleep(time.Hour) - - t.Status(fmt.Sprintf("killing node 3... (<%s)", time.Minute)) - c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.Node(3)) - - t.Status(fmt.Sprintf("waiting for increased snapshot data and raft log truncation (<%s)", 2*time.Hour)) - time.Sleep(2 * time.Hour) - - t.Status(fmt.Sprintf("starting node 3... (<%s)", time.Minute)) - c.Start(ctx, t.L(), startOpts, install.MakeClusterSettings(envOptions), c.Node(3)) - - t.Status(fmt.Sprintf("waiting for snapshot transfers to finish %s", 2*time.Hour)) - m.Go(func(ctx context.Context) error { - t.Status(fmt.Sprintf("starting monitoring thread (<%s)", time.Minute)) - getMetricVal := func(query string, label string) (float64, error) { - point, err := statCollector.CollectPoint(ctx, t.L(), timeutil.Now(), query) - if err != nil { - t.L().Errorf("could not query prom %s", err.Error()) - return 0, err - } - val := point[label] - for storeID, v := range val { - t.L().Printf("%s(store=%s): %f", query, storeID, v.Value) - // We only assert on the 3rd store. - if storeID == "3" { - return v.Value, nil - } - } - // Unreachable. - panic("unreachable") - } - getHistMetricVal := func(query string) (float64, error) { - at := timeutil.Now() - fromVal, warnings, err := promClient.Query(ctx, query, at) - if err != nil { - return 0, err - } - if len(warnings) > 0 { - return 0, errors.Newf("found warnings querying prometheus: %s", warnings) - } - - fromVec := fromVal.(model.Vector) - if len(fromVec) == 0 { - return 0, errors.Newf("Empty vector result for query %s @ %s (%v)", query, at.Format(time.RFC3339), fromVal) - } - return float64(fromVec[0].Value), nil - } - - // Assert on l0 sublevel count and p99 latencies. - latencyMetric := divQuery("histogram_quantile(0.99, sum by(le) (rate(sql_service_latency_bucket[2m])))", 1<<20 /* 1ms */) - const latencyThreshold = 100 // 100ms since the metric is scaled to 1ms above. - const sublevelMetric = "storage_l0_sublevels" - const sublevelThreshold = 20 - var l0SublevelCount []float64 - const sampleCountForL0Sublevel = 12 - const collectionIntervalSeconds = 10.0 - // Loop for ~120 minutes. - const numIterations = int(120 / (collectionIntervalSeconds / 60)) - numErrors := 0 - numSuccesses := 0 - for i := 0; i < numIterations; i++ { - time.Sleep(collectionIntervalSeconds * time.Second) - val, err := getHistMetricVal(latencyMetric) - if err != nil { - numErrors++ - continue - } - if val > latencyThreshold { - t.Fatalf("sql p99 latency %f exceeded threshold", val) - } - val, err = getMetricVal(sublevelMetric, "store") - if err != nil { - numErrors++ - continue - } - l0SublevelCount = append(l0SublevelCount, val) - // We want to use the mean of the last 2m of data to avoid short-lived - // spikes causing failures. - if len(l0SublevelCount) >= sampleCountForL0Sublevel { - latestSampleMeanL0Sublevels := getMeanOverLastN(sampleCountForL0Sublevel, l0SublevelCount) - if latestSampleMeanL0Sublevels > sublevelThreshold { - t.Fatalf("sub-level mean %f over last %d iterations exceeded threshold", latestSampleMeanL0Sublevels, sampleCountForL0Sublevel) - } - } - numSuccesses++ - } - t.Status(fmt.Sprintf("done monitoring, errors: %d successes: %d", numErrors, numSuccesses)) - if numErrors > numSuccesses { - t.Fatalf("too many errors retrieving metrics") - } - return nil - }) - - m.Wait() - }, - }) -} diff --git a/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_io.go b/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_io.go new file mode 100644 index 000000000000..87af5be32b62 --- /dev/null +++ b/pkg/cmd/roachtest/tests/admission_control_snapshot_overload_io.go @@ -0,0 +1,294 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "fmt" + "time" + + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/clusterstats" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/grafana" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" + "github.com/cockroachdb/cockroach/pkg/roachprod/install" + "github.com/cockroachdb/cockroach/pkg/roachprod/prometheus" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +// This test aims to test the behavior of range snapshots under heavy load. It +// sets up a 3 node cluster, where the cluster is pre-populated with about 500GB +// of data. Then, a foreground kv workload is run, and shortly after that, n3 is +// brought down. Upon restart, n3 starts to receive large amounts of snapshot +// data. It is expected that l0 sublevel counts and p99 latencies remain stable. +func registerSnapshotOverloadIO(r registry.Registry) { + spec := func(subtest string, cfg admissionControlSnapshotOverloadIOOpts) registry.TestSpec { + return registry.TestSpec{ + Name: "admission-control/snapshot-overload-io/" + subtest, + Owner: registry.OwnerAdmissionControl, + Benchmark: true, + CompatibleClouds: registry.OnlyGCE, + Suites: registry.Suites(registry.Weekly), + Cluster: r.MakeClusterSpec( + 4, + spec.CPU(4), + spec.WorkloadNode(), + spec.VolumeSize(cfg.volumeSize), + spec.ReuseNone(), + spec.DisableLocalSSD(), + ), + Leases: registry.MetamorphicLeases, + Timeout: 12 * time.Hour, + Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { + runAdmissionControlSnapshotOverloadIO(ctx, t, c, cfg) + }, + } + } + + // This tests the ability of the storage engine to handle a high rate of + // snapshots while maintaining a healthy LSM shape and stable p99 latencies. + r.Add(spec("excise", admissionControlSnapshotOverloadIOOpts{ + // The test uses a large volume size to ensure high provisioned bandwidth + // from the cloud provider. + volumeSize: 2000, + // COCKROACH_CONCURRENT_COMPACTIONS is set to 1 since we want to ensure + // that snapshot ingests don't result in LSM inversion even with a very + // low compaction rate. With Pebble's IngestAndExcise all the ingested + // sstables should ingest into L6. + limitCompactionConcurrency: true, + limitDiskBandwidth: false, + })) + + // This tests the behaviour of snpashot ingestion in bandwidth constrained + // environments. + r.Add(spec("bandwidth", admissionControlSnapshotOverloadIOOpts{ + // 2x headroom from the ~500GB pre-population of the test. + volumeSize: 1000, + limitCompactionConcurrency: false, + limitDiskBandwidth: true, + })) + +} + +type admissionControlSnapshotOverloadIOOpts struct { + volumeSize int + limitCompactionConcurrency bool + limitDiskBandwidth bool +} + +func runAdmissionControlSnapshotOverloadIO( + ctx context.Context, t test.Test, c cluster.Cluster, cfg admissionControlSnapshotOverloadIOOpts, +) { + if c.Spec().NodeCount < 4 { + t.Fatalf("expected at least 4 nodes, found %d", c.Spec().NodeCount) + } + + envOptions := install.EnvOption{ + // COCKROACH_RAFT_LOG_TRUNCATION_THRESHOLD is reduced so that there is + // certainty that the restarted node will be caught up via snapshots, + // and not via raft log replay. + fmt.Sprintf("COCKROACH_RAFT_LOG_TRUNCATION_THRESHOLD=%d", 512<<10 /* 512KiB */), + // COCKROACH_CONCURRENT_SNAPSHOT* is increased so that the rate of + // snapshot application is high. + "COCKROACH_CONCURRENT_SNAPSHOT_APPLY_LIMIT=100", + "COCKROACH_CONCURRENT_SNAPSHOT_SEND_LIMIT=100", + } + + if cfg.limitCompactionConcurrency { + envOptions = append(envOptions, "COCKROACH_CONCURRENT_COMPACTIONS=1") + } + + startOpts := option.NewStartOpts(option.NoBackupSchedule) + roachtestutil.SetDefaultAdminUIPort(c, &startOpts.RoachprodOpts) + roachtestutil.SetDefaultSQLPort(c, &startOpts.RoachprodOpts) + settings := install.MakeClusterSettings(envOptions) + c.Start(ctx, t.L(), startOpts, settings, c.CRDBNodes()) + + db := c.Conn(ctx, t.L(), len(c.CRDBNodes())) + defer db.Close() + + t.Status(fmt.Sprintf("configuring cluster settings (<%s)", 30*time.Second)) + { + // Defensive, since admission control is enabled by default. + setAdmissionControl(ctx, t, c, true) + // Ensure ingest splits and excises are enabled. (Enabled by default in v24.1+) + if _, err := db.ExecContext( + ctx, "SET CLUSTER SETTING kv.snapshot_receiver.excise.enabled = 'true'"); err != nil { + t.Fatalf("failed to set kv.snapshot_receiver.excise.enabled: %v", err) + } + if _, err := db.ExecContext( + ctx, "SET CLUSTER SETTING storage.ingest_split.enabled = 'true'"); err != nil { + t.Fatalf("failed to set storage.ingest_split.enabled: %v", err) + } + + // Set a high rebalance rate. + if _, err := db.ExecContext( + ctx, "SET CLUSTER SETTING kv.snapshot_rebalance.max_rate = '256MiB'"); err != nil { + t.Fatalf("failed to set kv.snapshot_rebalance.max_rate: %v", err) + } + } + + if cfg.limitDiskBandwidth { + const bandwidthLimit = 128 + dataDir := "/mnt/data1" + if err := setBandwidthLimit(ctx, t, c, c.CRDBNodes(), "wbps", bandwidthLimit<<20 /* 128MiB */, false, dataDir); err != nil { + t.Fatal(err) + } + if _, err := db.ExecContext( + ctx, fmt.Sprintf("SET CLUSTER SETTING kvadmission.store.provisioned_bandwidth = '%dMiB'", bandwidthLimit)); err != nil { + t.Fatalf("failed to set kvadmission.store.provisioned_bandwidth: %v", err) + } + } + + // Setup the prometheus instance and client. + t.Status(fmt.Sprintf("setting up prometheus/grafana (<%s)", 2*time.Minute)) + var statCollector clusterstats.StatCollector + promCfg := &prometheus.Config{} + promCfg.WithPrometheusNode(c.WorkloadNode().InstallNodes()[0]). + WithNodeExporter(c.CRDBNodes().InstallNodes()). + WithCluster(c.CRDBNodes().InstallNodes()). + WithGrafanaDashboardJSON(grafana.SnapshotAdmissionControlGrafanaJSON) + err := c.StartGrafana(ctx, t.L(), promCfg) + require.NoError(t, err) + cleanupFunc := func() { + if err := c.StopGrafana(ctx, t.L(), t.ArtifactsDir()); err != nil { + t.L().ErrorfCtx(ctx, "Error(s) shutting down prom/grafana %s", err) + } + } + defer cleanupFunc() + promClient, err := clusterstats.SetupCollectorPromClient(ctx, c, t.L(), promCfg) + require.NoError(t, err) + statCollector = clusterstats.NewStatsCollector(ctx, promClient) + + // Initialize the kv database, + t.Status(fmt.Sprintf("initializing kv dataset (<%s)", 2*time.Hour)) + c.Run(ctx, option.WithNodes(c.WorkloadNode()), + "./cockroach workload init kv --drop --insert-count=40000000 "+ + "--max-block-bytes=12288 --min-block-bytes=12288 {pgurl:1-3}") + + t.Status(fmt.Sprintf("starting kv workload thread (<%s)", time.Minute)) + m := c.NewMonitor(ctx, c.CRDBNodes()) + m.Go(func(ctx context.Context) error { + c.Run(ctx, option.WithNodes(c.WorkloadNode()), + fmt.Sprintf("./cockroach workload run kv --tolerate-errors "+ + "--splits=1000 --histograms=%s/stats.json --read-percent=75 "+ + "--max-rate=600 --max-block-bytes=12288 --min-block-bytes=12288 "+ + "--concurrency=4000 --duration=%s {pgurl:1-2}", + t.PerfArtifactsDir(), (6*time.Hour).String())) + return nil + }) + + t.Status(fmt.Sprintf("waiting for data build up (<%s)", time.Hour)) + time.Sleep(time.Hour) + + t.Status(fmt.Sprintf("killing node 3... (<%s)", time.Minute)) + c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.Node(3)) + + t.Status(fmt.Sprintf("waiting for increased snapshot data and raft log truncation (<%s)", 2*time.Hour)) + time.Sleep(2 * time.Hour) + + t.Status(fmt.Sprintf("starting node 3... (<%s)", time.Minute)) + c.Start(ctx, t.L(), startOpts, install.MakeClusterSettings(envOptions), c.Node(3)) + + t.Status(fmt.Sprintf("waiting for snapshot transfers to finish %s", 2*time.Hour)) + m.Go(func(ctx context.Context) error { + t.Status(fmt.Sprintf("starting monitoring thread (<%s)", time.Minute)) + getMetricVal := func(query string, label string) (float64, error) { + point, err := statCollector.CollectPoint(ctx, t.L(), timeutil.Now(), query) + if err != nil { + t.L().Errorf("could not query prom %s", err.Error()) + return 0, err + } + val := point[label] + for storeID, v := range val { + t.L().Printf("%s(store=%s): %f", query, storeID, v.Value) + // We only assert on the 3rd store. + if storeID == "3" { + return v.Value, nil + } + } + // Unreachable. + panic("unreachable") + } + getHistMetricVal := func(query string) (float64, error) { + at := timeutil.Now() + fromVal, warnings, err := promClient.Query(ctx, query, at) + if err != nil { + return 0, err + } + if len(warnings) > 0 { + return 0, errors.Newf("found warnings querying prometheus: %s", warnings) + } + + fromVec := fromVal.(model.Vector) + if len(fromVec) == 0 { + return 0, errors.Newf("Empty vector result for query %s @ %s (%v)", query, at.Format(time.RFC3339), fromVal) + } + return float64(fromVec[0].Value), nil + } + + // TODO(aaditya): assert on disk bandwidth subtest once integrated. + if !cfg.limitDiskBandwidth { + // Assert on l0 sublevel count and p99 latencies. + latencyMetric := divQuery("histogram_quantile(0.99, sum by(le) (rate(sql_service_latency_bucket[2m])))", 1<<20 /* 1ms */) + const latencyThreshold = 100 // 100ms since the metric is scaled to 1ms above. + const sublevelMetric = "storage_l0_sublevels" + const sublevelThreshold = 20 + var l0SublevelCount []float64 + const sampleCountForL0Sublevel = 12 + const collectionIntervalSeconds = 10.0 + // Loop for ~120 minutes. + const numIterations = int(120 / (collectionIntervalSeconds / 60)) + numErrors := 0 + numSuccesses := 0 + for i := 0; i < numIterations; i++ { + time.Sleep(collectionIntervalSeconds * time.Second) + val, err := getHistMetricVal(latencyMetric) + if err != nil { + numErrors++ + continue + } + if val > latencyThreshold { + t.Fatalf("sql p99 latency %f exceeded threshold", val) + } + val, err = getMetricVal(sublevelMetric, "store") + if err != nil { + numErrors++ + continue + } + l0SublevelCount = append(l0SublevelCount, val) + // We want to use the mean of the last 2m of data to avoid short-lived + // spikes causing failures. + if len(l0SublevelCount) >= sampleCountForL0Sublevel { + latestSampleMeanL0Sublevels := getMeanOverLastN(sampleCountForL0Sublevel, l0SublevelCount) + if latestSampleMeanL0Sublevels > sublevelThreshold { + t.Fatalf("sub-level mean %f over last %d iterations exceeded threshold", latestSampleMeanL0Sublevels, sampleCountForL0Sublevel) + } + } + numSuccesses++ + } + t.Status(fmt.Sprintf("done monitoring, errors: %d successes: %d", numErrors, numSuccesses)) + if numErrors > numSuccesses { + t.Fatalf("too many errors retrieving metrics") + } + } + return nil + }) + + m.Wait() +} diff --git a/pkg/cmd/roachtest/tests/admission_control_tpcc_overload.go b/pkg/cmd/roachtest/tests/admission_control_tpcc_overload.go index e0c63bae5127..1432265d09d9 100644 --- a/pkg/cmd/roachtest/tests/admission_control_tpcc_overload.go +++ b/pkg/cmd/roachtest/tests/admission_control_tpcc_overload.go @@ -121,7 +121,7 @@ func verifyNodeLiveness( if err := retry.WithMaxAttempts(ctx, retry.Options{ MaxBackoff: 500 * time.Millisecond, }, 60, func() (err error) { - response, err = getMetrics(ctx, c, t, adminURLs[0], now.Add(-runDuration), now, []tsQuery{ + response, err = getMetrics(ctx, c, t, adminURLs[0], "", now.Add(-runDuration), now, []tsQuery{ { name: "cr.node.liveness.heartbeatfailures", queryType: total, diff --git a/pkg/cmd/roachtest/tests/cdc.go b/pkg/cmd/roachtest/tests/cdc.go index d7d80b3ff19c..6769e73191e7 100644 --- a/pkg/cmd/roachtest/tests/cdc.go +++ b/pkg/cmd/roachtest/tests/cdc.go @@ -82,6 +82,7 @@ const ( pubsubSink sinkType = "pubsub" kafkaSink sinkType = "kafka" azureEventHubKafkaSink sinkType = "azure-event-hub" + mskSink sinkType = "msk" nullSink sinkType = "null" ) @@ -271,6 +272,13 @@ func (ct *cdcTester) setupSink(args feedArgs) string { `azure-event-hub://cdc-roachtest.servicebus.windows.net:9093?shared_access_key_name=%s&shared_access_key=%s&topic_name=testing`, url.QueryEscape(accessKeyName), url.QueryEscape(accessKey), ) + case mskSink: + // Currently, the only msk tests are manual tests. When they are run, + // this placeholder should be replaced with the actual bootstrap server + // for the cluster being used. + // TODO(yang): If we want to run msk roachtests nightly, replace this + // with a long-running MSK cluster or maybe create a fresh cluster. + sinkURI = "kafka://placeholder" default: ct.t.Fatalf("unknown sink provided: %s", args.sinkType) } @@ -1370,7 +1378,7 @@ func registerCDC(r registry.Registry) { Cluster: r.MakeClusterSpec(4, spec.WorkloadNode(), spec.CPU(16)), Leases: registry.MetamorphicLeases, RequiresLicense: true, - CompatibleClouds: registry.AllExceptAWS, + CompatibleClouds: registry.AllClouds, Suites: registry.Suites(registry.Nightly), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { ct := newCDCTester(ctx, t, c) @@ -1393,6 +1401,38 @@ func registerCDC(r registry.Registry) { ct.waitForWorkload() }, }) + r.Add(registry.TestSpec{ + Name: "cdc/tpcc-1000/sink=msk", + Owner: registry.OwnerCDC, + Benchmark: true, + Cluster: r.MakeClusterSpec(4, spec.WorkloadNode(), spec.CPU(16)), + Leases: registry.MetamorphicLeases, + RequiresLicense: true, + CompatibleClouds: registry.OnlyAWS, + Suites: registry.ManualOnly, + Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { + ct := newCDCTester(ctx, t, c) + defer ct.Close() + + ct.runTPCCWorkload(tpccArgs{warehouses: 1000, duration: "60m"}) + + feed := ct.newChangefeed(feedArgs{ + sinkType: mskSink, + targets: allTpccTargets, + opts: map[string]string{ + "initial_scan": "'no'", + // updated is specified so that we can compare emitted bytes with + // cdc/tpcc-1000/sink=kafka. + "updated": "", + }, + }) + ct.runFeedLatencyVerifier(feed, latencyTargets{ + initialScanLatency: 3 * time.Minute, + steadyLatency: 10 * time.Minute, + }) + ct.waitForWorkload() + }, + }) r.Add(registry.TestSpec{ Name: "cdc/tpcc-1000/sink=cloudstorage", Owner: registry.OwnerCDC, diff --git a/pkg/cmd/roachtest/tests/cluster_to_cluster.go b/pkg/cmd/roachtest/tests/cluster_to_cluster.go index b8d0fc709760..883e75673820 100644 --- a/pkg/cmd/roachtest/tests/cluster_to_cluster.go +++ b/pkg/cmd/roachtest/tests/cluster_to_cluster.go @@ -1122,7 +1122,7 @@ func registerClusterToCluster(r registry.Registry) { timeout: 3 * time.Hour, additionalDuration: 60 * time.Minute, cutover: 30 * time.Minute, - clouds: registry.AllExceptAzure, + clouds: registry.AllClouds, suites: registry.Suites(registry.Nightly), }, { diff --git a/pkg/cmd/roachtest/tests/copy.go b/pkg/cmd/roachtest/tests/copy.go index f4199a856909..43cca5c9e800 100644 --- a/pkg/cmd/roachtest/tests/copy.go +++ b/pkg/cmd/roachtest/tests/copy.go @@ -47,20 +47,9 @@ func registerCopy(r registry.Registry) { const rowOverheadEstimate = 160 const rowEstimate = rowOverheadEstimate + payload - // We run this without metamorphic constants as kv-batch-size = 1 makes - // this test take far too long to complete. - // TODO(DarrylWong): Use a metamorphic constants exclusion list instead. - // See: https://github.com/cockroachdb/cockroach/issues/113164 - settings := install.MakeClusterSettings() - settings.Env = append(settings.Env, "COCKROACH_INTERNAL_DISABLE_METAMORPHIC_TESTING=true") - c.Start(ctx, t.L(), option.DefaultStartOpts(), settings, c.All()) - - // Make sure the copy commands have sufficient time to finish when - // runtime assertions are enabled. + c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.All()) + copyTimeout := 10 * time.Minute - if UsingRuntimeAssertions(t) { - copyTimeout = 20 * time.Minute - } m := c.NewMonitor(ctx, c.All()) m.Go(func(ctx context.Context) error { diff --git a/pkg/cmd/roachtest/tests/decommission.go b/pkg/cmd/roachtest/tests/decommission.go index 14ac63196e98..d657aac57873 100644 --- a/pkg/cmd/roachtest/tests/decommission.go +++ b/pkg/cmd/roachtest/tests/decommission.go @@ -37,9 +37,9 @@ import ( "golang.org/x/sync/errgroup" ) -// shudownMaxWait is the default maximum duration (in seconds) that we +// shudownGracePeriod is the default grace period (in seconds) that we // will wait for a graceful shutdown. -const shutdownMaxWait = 300 +const shutdownGracePeriod = 300 func registerDecommission(r registry.Registry) { { @@ -300,7 +300,7 @@ func runDecommission( }) } - stopOpts := option.NewStopOpts(option.Graceful(shutdownMaxWait)) + stopOpts := option.NewStopOpts(option.Graceful(shutdownGracePeriod)) m.Go(func() error { tBegin, whileDown := timeutil.Now(), true diff --git a/pkg/cmd/roachtest/tests/decommissionbench.go b/pkg/cmd/roachtest/tests/decommissionbench.go index c17639083c4a..0f11dfac73c9 100644 --- a/pkg/cmd/roachtest/tests/decommissionbench.go +++ b/pkg/cmd/roachtest/tests/decommissionbench.go @@ -884,7 +884,7 @@ func runSingleDecommission( // stuck with replicas in purgatory, by pinning them to a node. // We stop nodes gracefully when needed. - stopOpts := option.NewStopOpts(option.Graceful(shutdownMaxWait)) + stopOpts := option.NewStopOpts(option.Graceful(shutdownGracePeriod)) // Gather metadata for logging purposes and wait for balance. var bytesUsed, rangeCount, totalRanges int64 diff --git a/pkg/cmd/roachtest/tests/disagg_rebalance.go b/pkg/cmd/roachtest/tests/disagg_rebalance.go index 948ef3452e11..263b46f05cda 100644 --- a/pkg/cmd/roachtest/tests/disagg_rebalance.go +++ b/pkg/cmd/roachtest/tests/disagg_rebalance.go @@ -136,25 +136,28 @@ func registerDisaggRebalance(r registry.Registry) { t.Fatalf("did not replicate to n4 quickly enough, only found %d replicas", count) } - var bytesInRanges int64 - if err := db.QueryRow( - "SELECT metrics['livebytes']::INT FROM crdb_internal.kv_store_status WHERE node_id = $1 LIMIT 1", - 4, - ).Scan(&bytesInRanges); err != nil { - t.Fatal(err) - } - var bytesSnapshotted int64 - if err := db.QueryRow( - "SELECT metrics['range.snapshots.rcvd-bytes']::INT FROM crdb_internal.kv_store_status WHERE node_id = $1 LIMIT 1", - 4, - ).Scan(&bytesSnapshotted); err != nil { - t.Fatal(err) - } + testutils.SucceedsWithin(t, func() error { + var bytesInRanges int64 + if err := db.QueryRow( + "SELECT metrics['livebytes']::INT FROM crdb_internal.kv_store_status WHERE node_id = $1 LIMIT 1", + 4, + ).Scan(&bytesInRanges); err != nil { + t.Fatal(err) + } + var bytesSnapshotted int64 + if err := db.QueryRow( + "SELECT metrics['range.snapshots.rcvd-bytes']::INT FROM crdb_internal.kv_store_status WHERE node_id = $1 LIMIT 1", + 4, + ).Scan(&bytesSnapshotted); err != nil { + t.Fatal(err) + } - t.L().PrintfCtx(ctx, "got snapshot received bytes = %s, logical bytes in ranges = %s", humanize.IBytes(uint64(bytesSnapshotted)), humanize.IBytes(uint64(bytesInRanges))) - if bytesSnapshotted > bytesInRanges { - t.Fatalf("unexpected snapshot received bytes %d > bytes in all replicas on n4 %d, did not do a disaggregated rebalance?", bytesSnapshotted, bytesInRanges) - } + t.L().PrintfCtx(ctx, "got snapshot received bytes = %s, logical bytes in ranges = %s", humanize.IBytes(uint64(bytesSnapshotted)), humanize.IBytes(uint64(bytesInRanges))) + if bytesSnapshotted > bytesInRanges { + return errors.Errorf("unexpected snapshot received bytes %d > bytes in all replicas on n4 %d, did not do a disaggregated rebalance?", bytesSnapshotted, bytesInRanges) + } + return nil + }, 5*time.Minute) t.Status("continue tpcc") diff --git a/pkg/cmd/roachtest/tests/disk_stall.go b/pkg/cmd/roachtest/tests/disk_stall.go index b160da88f27b..999ef47a6f54 100644 --- a/pkg/cmd/roachtest/tests/disk_stall.go +++ b/pkg/cmd/roachtest/tests/disk_stall.go @@ -159,7 +159,7 @@ func runDiskStalledWALFailover( t.Fatal("process exited unexectedly") } - data := mustGetMetrics(ctx, c, t, adminURL, + data := mustGetMetrics(ctx, c, t, adminURL, install.SystemInterfaceName, workloadStartAt.Add(5*time.Minute), timeutil.Now().Add(-time.Minute), []tsQuery{ @@ -308,7 +308,7 @@ func runDiskStalledDetection( } stallAt := timeutil.Now() - response := mustGetMetrics(ctx, c, t, adminURL, workloadStartAt, stallAt, []tsQuery{ + response := mustGetMetrics(ctx, c, t, adminURL, install.SystemInterfaceName, workloadStartAt, stallAt, []tsQuery{ {name: "cr.node.sql.query.count", queryType: total}, }) cum := response.Results[0].Datapoints @@ -360,7 +360,7 @@ func runDiskStalledDetection( { now := timeutil.Now() - response := mustGetMetrics(ctx, c, t, adminURL, workloadStartAt, now, []tsQuery{ + response := mustGetMetrics(ctx, c, t, adminURL, install.SystemInterfaceName, workloadStartAt, now, []tsQuery{ {name: "cr.node.sql.query.count", queryType: total}, }) cum := response.Results[0].Datapoints diff --git a/pkg/cmd/roachtest/tests/drop.go b/pkg/cmd/roachtest/tests/drop.go index be7b279eacfc..0aa178b6ce50 100644 --- a/pkg/cmd/roachtest/tests/drop.go +++ b/pkg/cmd/roachtest/tests/drop.go @@ -148,7 +148,7 @@ func registerDrop(r registry.Registry) { if !allNodesSpaceCleared { sizeReport += fmt.Sprintf("disk space usage has not dropped below %s on all nodes.", humanizeutil.IBytes(int64(maxSizeBytes))) - t.Fatalf(sizeReport) + t.Fatal(sizeReport) } return nil diff --git a/pkg/cmd/roachtest/tests/encryption.go b/pkg/cmd/roachtest/tests/encryption.go index efaffe3f2c8a..1085745c681d 100644 --- a/pkg/cmd/roachtest/tests/encryption.go +++ b/pkg/cmd/roachtest/tests/encryption.go @@ -85,7 +85,7 @@ func registerEncryption(r registry.Registry) { } for i := 1; i <= nodes; i++ { - c.Stop(ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownMaxWait)), c.Node(i)) + c.Stop(ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownGracePeriod)), c.Node(i)) } } } diff --git a/pkg/cmd/roachtest/tests/export_parquet.go b/pkg/cmd/roachtest/tests/export_parquet.go index 6f9d11e09d96..b36c237f865b 100644 --- a/pkg/cmd/roachtest/tests/export_parquet.go +++ b/pkg/cmd/roachtest/tests/export_parquet.go @@ -103,7 +103,7 @@ func registerExportParquet(r registry.Registry) { fmt.Sprintf("EXPORT INTO PARQUET 'nodelocal://1/outputfile%d' FROM SELECT * FROM %s", fileNum, target)) fileNum += 1 if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } } t.Status(fmt.Sprintf("worker %d/%d terminated", i+1, numConcurrentExports)) @@ -161,7 +161,7 @@ func registerExportParquet(r registry.Registry) { _, err := db.Exec( fmt.Sprintf("EXPORT INTO PARQUET 'nodelocal://1/outputfile%d' FROM SELECT * FROM %s", i, target)) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } t.Status(fmt.Sprintf("worker %d/%d terminated", i+1, numWorkers)) wg.Done() diff --git a/pkg/cmd/roachtest/tests/failover.go b/pkg/cmd/roachtest/tests/failover.go index 1c98978a7b1c..2e30a1f24126 100644 --- a/pkg/cmd/roachtest/tests/failover.go +++ b/pkg/cmd/roachtest/tests/failover.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/errors" "github.com/lib/pq" @@ -526,10 +527,11 @@ func runFailoverPartialLeaseGateway(ctx context.Context, t test.Test, c cluster. // n1-n3: system and liveness ranges, SQL gateway // n4-n6: user ranges // -// The cluster runs with COCKROACH_DISABLE_LEADER_FOLLOWS_LEASEHOLDER, which -// will place Raft leaders and leases independently of each other. We can then -// assume that some number of user ranges will randomly have split leader/lease, -// and simply create partial partitions between each of n4-n6 in sequence. +// We then create partial partitions where one of n4-n6 is unable to reach the +// other two nodes but is still able to reach the liveness range. This will +// cause split leader/leaseholder scenarios if raft leadership fails over from a +// partitioned node but the lease does not. We expect this to be a problem for +// epoch-based leases, but not for other types of leases. // // We run a kv50 workload on SQL gateways and collect pMax latency for graphing. func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -537,11 +539,9 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C rng, _ := randutil.NewTestRand() - // Create cluster, disabling leader/leaseholder colocation. We only start - // n1-n3, to precisely place system ranges, since we'll have to disable the - // replicate queue shortly. + // Create cluster. We only start n1-n3, to precisely place system ranges, + // since we'll have to disable the replicate queue shortly. settings := install.MakeClusterSettings() - settings.Env = append(settings.Env, "COCKROACH_DISABLE_LEADER_FOLLOWS_LEASEHOLDER=true") settings.Env = append(settings.Env, "COCKROACH_SCAN_MAX_IDLE_TIME=100ms") // speed up replication m := c.NewMonitor(ctx, c.CRDBNodes()) @@ -578,22 +578,6 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C relocateRanges(t, ctx, conn, `database_name = 'kv'`, []int{1, 2, 3}, []int{4, 5, 6}) relocateRanges(t, ctx, conn, `database_name != 'kv'`, []int{4, 5, 6}, []int{1, 2, 3}) - // Check that we have a few split leaders/leaseholders on n4-n6. We give - // it a few seconds, since metrics are updated every 10 seconds. - for i := 0; ; i++ { - var count float64 - for _, node := range []int{4, 5, 6} { - count += nodeMetric(ctx, t, c, node, "replicas.leaders_not_leaseholders") - } - t.L().Printf("%.0f split leaders/leaseholders", count) - if count >= 3 { - break - } else if i >= 10 { - t.Fatalf("timed out waiting for 3 split leaders/leaseholders") - } - time.Sleep(time.Second) - } - // Run workload on n7 via n1-n3 gateways until test ends (context cancels). t.L().Printf("running workload") cancelWorkload := m.GoWithCancel(func(ctx context.Context) error { @@ -606,13 +590,21 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C return err }) - // Start a worker to fail and recover partial partitions between each pair of - // n4-n6 for 3 cycles (9 failures total). + // Start a worker to fail and recover partial partitions between each of n4-n6 + // and the other two nodes for 3 cycles (9 failures total). m.Go(func(ctx context.Context) error { defer cancelWorkload() + nodes := []int{4, 5, 6} for i := 0; i < 3; i++ { - for _, node := range []int{4, 5, 6} { + for _, node := range nodes { + var peers []int + for _, peer := range nodes { + if peer != node { + peers = append(peers, peer) + } + } + sleepFor(ctx, t, time.Minute) // Ranges may occasionally escape their constraints. Move them to where @@ -626,17 +618,17 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C failer.Ready(ctx, node) - peer := node + 1 - if peer > 6 { - peer = 4 + for _, peer := range peers { + t.L().Printf("failing n%d to n%d (%s lease/leader)", node, peer, failer) + failer.FailPartial(ctx, node, []int{peer}) } - t.L().Printf("failing n%d to n%d (%s lease/leader)", node, peer, failer) - failer.FailPartial(ctx, node, []int{peer}) sleepFor(ctx, t, time.Minute) - t.L().Printf("recovering n%d to n%d (%s lease/leader)", node, peer, failer) - failer.Recover(ctx, node) + for _, peer := range peers { + t.L().Printf("recovering n%d to n%d (%s lease/leader)", node, peer, failer) + failer.Recover(ctx, node) + } } } @@ -1444,9 +1436,6 @@ func (f *deadlockFailer) Fail(ctx context.Context, nodeID int) { f.locks = map[int][]roachpb.RangeID{} } - ctx, cancel := context.WithTimeout(ctx, 20*time.Second) // can take a while to lock - defer cancel() - var ranges []roachpb.RangeID if f.onlyLeaseholders { ranges = append(ranges, f.leases[nodeID]...) @@ -1462,8 +1451,18 @@ func (f *deadlockFailer) Fail(ctx context.Context, nodeID int) { for i := 0; i < len(ranges) && len(f.locks[nodeID]) < f.numReplicas; i++ { rangeID := ranges[i] var locked bool - require.NoError(f.t, conn.QueryRowContext(ctx, - `SELECT crdb_internal.unsafe_lock_replica($1::int, true)`, rangeID).Scan(&locked)) + // Retry the lock acquisition for a bit. Transient errors are possible here + // if there is another failure in the system that hasn't cleared up yet; to + // run a SQL query we may need to run internal queries related to user auth. + // + // See: https://github.com/cockroachdb/cockroach/issues/129918 + testutils.SucceedsSoon(f.t, func() error { + ctx, cancel := context.WithTimeout(ctx, 20*time.Second) // can take a while to lock + defer cancel() + return conn.QueryRowContext(ctx, + `SELECT crdb_internal.unsafe_lock_replica($1::int, true)`, rangeID).Scan(&locked) + }) + // NB: `locked` is false if the replica moved off the node in the interim. if locked { f.locks[nodeID] = append(f.locks[nodeID], rangeID) f.t.L().Printf("locked r%d on n%d", rangeID, nodeID) diff --git a/pkg/cmd/roachtest/tests/follower_reads.go b/pkg/cmd/roachtest/tests/follower_reads.go index 834b685d2341..61ddd9bace3f 100644 --- a/pkg/cmd/roachtest/tests/follower_reads.go +++ b/pkg/cmd/roachtest/tests/follower_reads.go @@ -1017,12 +1017,9 @@ func runFollowerReadsMixedVersionTest( var data map[int]int64 runInit := func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { - if topology.multiRegion && - h.IsMultitenant() && - !h.Context().FromVersion.AtLeast(mixedversion.TenantsAndSystemAlignedSettingsVersion) { - const setting = "sql.multi_region.allow_abstractions_for_secondary_tenants.enabled" - if err := setTenantSetting(l, r, h, setting, true); err != nil { - return errors.Wrapf(err, "setting %s", setting) + if topology.multiRegion { + if err := enableTenantMultiRegion(l, r, h); err != nil { + return err } } @@ -1040,3 +1037,15 @@ func runFollowerReadsMixedVersionTest( mvt.AfterUpgradeFinalized("run follower reads", runFollowerReads) mvt.Run() } + +// enableTenantMultiRegion enables multi-region features on the +// mixedversion tenant if necessary (no-op otherwise). +func enableTenantMultiRegion(l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + if !h.IsMultitenant() || h.Context().FromVersion.AtLeast(mixedversion.TenantsAndSystemAlignedSettingsVersion) { + return nil + } + + const setting = "sql.multi_region.allow_abstractions_for_secondary_tenants.enabled" + err := setTenantSetting(l, r, h, setting, true) + return errors.Wrapf(err, "setting %s", setting) +} diff --git a/pkg/cmd/roachtest/tests/gossip.go b/pkg/cmd/roachtest/tests/gossip.go index d90f688af99d..ad93b02f8589 100644 --- a/pkg/cmd/roachtest/tests/gossip.go +++ b/pkg/cmd/roachtest/tests/gossip.go @@ -160,7 +160,7 @@ SELECT node_id type gossipUtil struct { waitTime time.Duration urlMap map[int]string - conn func(ctx context.Context, l *logger.Logger, i int, opts ...func(*option.ConnOption)) *gosql.DB + conn func(ctx context.Context, l *logger.Logger, i int, opts ...option.OptionFunc) *gosql.DB httpClient *roachtestutil.RoachtestHTTPClient } @@ -467,7 +467,7 @@ SELECT count(replicas) // current infrastructure which doesn't know about cockroach nodes started on // non-standard ports. g := newGossipUtil(ctx, t, c) - g.conn = func(ctx context.Context, l *logger.Logger, i int, opts ...func(*option.ConnOption)) *gosql.DB { + g.conn = func(ctx context.Context, l *logger.Logger, i int, _ ...option.OptionFunc) *gosql.DB { if i != 1 { return c.Conn(ctx, l, i) } diff --git a/pkg/cmd/roachtest/tests/hibernate.go b/pkg/cmd/roachtest/tests/hibernate.go index cb8a29d96ac1..e8e292dffa83 100644 --- a/pkg/cmd/roachtest/tests/hibernate.go +++ b/pkg/cmd/roachtest/tests/hibernate.go @@ -28,7 +28,7 @@ var hibernateReleaseTagRegex = regexp.MustCompile(`^(?P\d+)\.(?P\d // WARNING: DO NOT MODIFY the name of the below constant/variable without approval from the docs team. // This is used by docs automation to produce a list of supported versions for ORM's. -var supportedHibernateTag = "6.3.1" +var supportedHibernateTag = "6.6.0" type hibernateOptions struct { testName string diff --git a/pkg/cmd/roachtest/tests/inconsistency.go b/pkg/cmd/roachtest/tests/inconsistency.go index 9c1e979b79d7..16c404bebec9 100644 --- a/pkg/cmd/roachtest/tests/inconsistency.go +++ b/pkg/cmd/roachtest/tests/inconsistency.go @@ -25,7 +25,7 @@ import ( func registerInconsistency(r registry.Registry) { r.Add(registry.TestSpec{ Name: "inconsistency", - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Cluster: r.MakeClusterSpec(3), CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), diff --git a/pkg/cmd/roachtest/tests/jobs_util.go b/pkg/cmd/roachtest/tests/jobs_util.go index a28b63b4c620..b723407f0c54 100644 --- a/pkg/cmd/roachtest/tests/jobs_util.go +++ b/pkg/cmd/roachtest/tests/jobs_util.go @@ -24,10 +24,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" - "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/randutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -152,7 +152,7 @@ func executeNodeShutdown( } else { t.L().Printf(`stopping node gracefully %s`, target) if err := c.StopE( - ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownMaxWait)), c.Node(cfg.shutdownNode), + ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownGracePeriod)), c.Node(cfg.shutdownNode), ); err != nil { return errors.Wrapf(err, "could not stop node %s", target) } @@ -174,23 +174,72 @@ func executeNodeShutdown( return nil } +type checkStatusFunc func(status jobs.Status) (success bool, unexpected bool) + +func WaitForStatus( + ctx context.Context, + db *gosql.DB, + jobID jobspb.JobID, + check checkStatusFunc, + maxWait time.Duration, +) error { + startTime := timeutil.Now() + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + var status string + for { + select { + case <-ticker.C: + err := db.QueryRowContext(ctx, `SELECT status FROM [SHOW JOB $1]`, jobID).Scan(&status) + if err != nil { + return errors.Wrapf(err, "getting the job status %s", status) + } + success, unexpected := check(jobs.Status(status)) + if unexpected { + return errors.Newf("unexpectedly found job %d in state %s", jobID, status) + } + if success { + return nil + } + if timeutil.Since(startTime) > maxWait { + return errors.Newf("job %d did not reach status %s after %s", jobID, status, maxWait) + } + case <-ctx.Done(): + return errors.Wrapf(ctx.Err(), "context canceled while waiting for job to reach status %s", status) + } + } +} + func WaitForRunning( ctx context.Context, db *gosql.DB, jobID jobspb.JobID, maxWait time.Duration, ) error { - return testutils.SucceedsWithinError(func() error { - var status jobs.Status - if err := db.QueryRowContext(ctx, "SELECT status FROM [SHOW JOB $1]", jobID).Scan(&status); err != nil { - return err - } - switch status { - case jobs.StatusPending: - case jobs.StatusRunning: - default: - return errors.Newf("job too fast! job got to state %s before the target node could be shutdown", - status) - } - return nil - }, maxWait) + return WaitForStatus(ctx, db, jobID, + func(status jobs.Status) (success bool, unexpected bool) { + switch status { + case jobs.StatusRunning: + return true, false + case jobs.StatusPending: + return false, false + default: + return false, true + } + }, maxWait) +} + +func WaitForSucceeded( + ctx context.Context, db *gosql.DB, jobID jobspb.JobID, maxWait time.Duration, +) error { + return WaitForStatus(ctx, db, jobID, + func(status jobs.Status) (success bool, unexpected bool) { + switch status { + case jobs.StatusSucceeded: + return true, false + case jobs.StatusRunning: + return false, false + default: + return false, true + } + }, maxWait) } type jobRecord struct { diff --git a/pkg/cmd/roachtest/tests/kv.go b/pkg/cmd/roachtest/tests/kv.go index f3265d632022..6fb88c9015c4 100644 --- a/pkg/cmd/roachtest/tests/kv.go +++ b/pkg/cmd/roachtest/tests/kv.go @@ -438,7 +438,7 @@ func registerKVContention(r registry.Registry) { func registerKVQuiescenceDead(r registry.Registry) { r.Add(registry.TestSpec{ Name: "kv/quiescence/nodes=3", - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Cluster: r.MakeClusterSpec(4, spec.WorkloadNode()), CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), @@ -488,13 +488,9 @@ func registerKVQuiescenceDead(r registry.Registry) { }) // Graceful shut down third node. m.ExpectDeath() - if err := c.StopE( + c.Stop( ctx, t.L(), option.NewStopOpts(option.Graceful(30)), c.Node(len(c.CRDBNodes())), - ); err != nil { - t.L().Printf("graceful shutdown failed: %v", err) - // If graceful shutdown fails within 30 seconds, proceed with hard shutdown. - c.Stop(ctx, t.L(), option.DefaultStopOpts(), c.Node(len(c.CRDBNodes()))) - } + ) // Measure qps with node down (i.e. without quiescence). qpsOneDown := qps(func() { // Use a different seed to make sure it's not just stepping into the @@ -534,6 +530,7 @@ func registerKVGracefulDraining(r registry.Registry) { dbs := make([]*gosql.DB, nodes-1) for i := range dbs { dbs[i] = c.Conn(ctx, t.L(), i+1) + defer dbs[i].Close() } err := WaitFor3XReplication(ctx, t, t.L(), dbs[0]) @@ -565,9 +562,11 @@ func registerKVGracefulDraining(r registry.Registry) { // Three iterations, each iteration has a 3-minute duration. desiredRunDuration := 10 * time.Minute m.Go(func(ctx context.Context) error { + // TODO(baptist): Remove --tolerate-errors once #129427 is addressed. + // Don't connect to the node we are going to shut down. cmd := fmt.Sprintf( - "./cockroach workload run kv --duration=%s --read-percent=50 --follower-read-percent=50 --concurrency=200 --max-rate=%d {pgurl%s}", - desiredRunDuration, specifiedQPS, c.CRDBNodes()) + "./cockroach workload run kv --tolerate-errors --duration=%s --read-percent=50 --follower-read-percent=50 --concurrency=200 --max-rate=%d {pgurl%s}", + desiredRunDuration, specifiedQPS, c.Range(1, nodes-1)) t.WorkerStatus(cmd) defer func() { t.WorkerStatus("workload command completed") @@ -616,37 +615,11 @@ func registerKVGracefulDraining(r registry.Registry) { t.Status("letting workload run with all nodes") select { case <-ctx.Done(): - return + t.Fatalf("context cancelled while waiting") case <-time.After(2 * time.Minute): } } - // Graceful drain and allow it to complete. The liveness record is - // updated at the beginning of the drain process, so by time the drain - // completes in ~5s all other nodes should "know" it is draining. - cmd := fmt.Sprintf("./cockroach node drain --certs-dir=%s --port={pgport%s} --self", install.CockroachNodeCertsDir, restartNode) - c.Run(ctx, option.WithNodes(restartNode), cmd) - // Simulate a hard network drop to this node prior to shutting it down. - // This is what we see in some customer environments. As an example, a - // docker container shutdown will also disappear from the network and - // drop all packets in both directions. - // TODO(baptist): Convert this to use a network partitioning - // utility function. - if !c.IsLocal() { - c.Run(ctx, option.WithNodes(restartNode), `sudo iptables -A INPUT -p tcp --dport 26257 -j DROP`) - c.Run(ctx, option.WithNodes(restartNode), `sudo iptables -A OUTPUT -p tcp --dport 26257 -j DROP`) - } - c.Stop(ctx, t.L(), option.DefaultStopOpts(), restartNode) - t.Status("letting workload run with one node down") - select { - case <-ctx.Done(): - return - case <-time.After(1 * time.Minute): - } - // Clean up the iptables rule before restarting. - if !c.IsLocal() { - c.Run(ctx, option.WithNodes(restartNode), `sudo iptables -F`) - } - c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), restartNode) + drainWithIpTables(ctx, restartNode, c, t) m.ResetDeaths() } @@ -668,6 +641,40 @@ func registerKVGracefulDraining(r registry.Registry) { }) } +// drainWithIpTables does a graceful drain and allows it to complete. The +// liveness record is updated at the beginning of the drain process, so by time +// the drain completes in ~5s all other nodes should "know" it is draining. +func drainWithIpTables( + ctx context.Context, restartNode option.NodeListOption, c cluster.Cluster, t test.Test, +) { + cmd := fmt.Sprintf("./cockroach node drain --certs-dir=%s --port={pgport%s} --self", install.CockroachNodeCertsDir, restartNode) + c.Run(ctx, option.WithNodes(restartNode), cmd) + + // Simulate a hard network drop to this node prior to shutting it down. This + // is what we see in some customer environments. As an example, a docker + // container shutdown will also disappear from the network and drop all + // packets in both directions. + // TODO(baptist): Convert this to use a network partitioning utility. + if !c.IsLocal() { + c.Run(ctx, option.WithNodes(restartNode), `sudo iptables -A INPUT -p tcp --dport 26257 -j DROP`) + c.Run(ctx, option.WithNodes(restartNode), `sudo iptables -A OUTPUT -p tcp --dport 26257 -j DROP`) + // NB: We don't use the original context as it might be cancelled. + defer c.Run(context.Background(), option.WithNodes(restartNode), `sudo iptables -F`) + } + c.Stop(ctx, t.L(), option.DefaultStopOpts(), restartNode) + + t.Status("letting workload run with one node down") + select { + case <-ctx.Done(): + t.Fatalf("context cancelled while waiting") + case <-time.After(1 * time.Minute): + } + + startOpts := option.DefaultStartOpts() + startOpts.RoachprodOpts.SkipInit = true + c.Start(ctx, t.L(), startOpts, install.MakeClusterSettings(), restartNode) +} + func registerKVSplits(r registry.Registry) { for _, item := range []struct { quiesce bool diff --git a/pkg/cmd/roachtest/tests/large_schema_benchmark.go b/pkg/cmd/roachtest/tests/large_schema_benchmark.go index 0ddbb012531a..65ff6f03eb2b 100644 --- a/pkg/cmd/roachtest/tests/large_schema_benchmark.go +++ b/pkg/cmd/roachtest/tests/large_schema_benchmark.go @@ -163,6 +163,10 @@ func registerLargeSchemaBenchmark(r registry.Registry, numTables int, isMultiReg // completes in a reasonable amount of time. _, err := conn.Exec("SET CLUSTER SETTING jobs.retention_time='1h'") require.NoError(t, err) + // Use a higher number of retries, since we hit retry errors on importing + // a large number of tables + _, err = conn.Exec("SET CLUSTER SETTING kv.transaction.internal.max_auto_retries=500") + require.NoError(t, err) // Create a user that will be used for authentication for the REST // API calls. _, err = conn.Exec("CREATE USER roachadmin password 'roacher'") diff --git a/pkg/cmd/roachtest/tests/latency_verifier.go b/pkg/cmd/roachtest/tests/latency_verifier.go index 85b040668f73..586434a340f0 100644 --- a/pkg/cmd/roachtest/tests/latency_verifier.go +++ b/pkg/cmd/roachtest/tests/latency_verifier.go @@ -129,9 +129,10 @@ func (lv *latencyVerifier) noteHighwater(highwaterTime time.Time) { // that's less than the max allowed. Verify at the end // of the test that this happens at some point. if lv.maxSeenSteadyEveryN.ShouldLog() { - lv.setTestStatus(fmt.Sprintf( + update := fmt.Sprintf( "%s: end-to-end latency %s not yet below target steady latency %s", - lv.name, latency.Truncate(time.Millisecond), lv.targetSteadyLatency.Truncate(time.Millisecond))) + lv.name, latency.Truncate(time.Millisecond), lv.targetSteadyLatency.Truncate(time.Millisecond)) + lv.setTestStatus(update) } return } @@ -142,9 +143,10 @@ func (lv *latencyVerifier) noteHighwater(highwaterTime time.Time) { lv.maxSeenSteadyLatency = latency } if lv.maxSeenSteadyEveryN.ShouldLog() { - lv.setTestStatus(fmt.Sprintf( + update := fmt.Sprintf( "%s: end-to-end steady latency %s; max steady latency so far %s; highwater %s", - lv.name, latency.Truncate(time.Millisecond), lv.maxSeenSteadyLatency.Truncate(time.Millisecond), highwaterTime)) + lv.name, latency.Truncate(time.Millisecond), lv.maxSeenSteadyLatency.Truncate(time.Millisecond), highwaterTime) + lv.setTestStatus(update) } } diff --git a/pkg/cmd/roachtest/tests/libpq_blocklist.go b/pkg/cmd/roachtest/tests/libpq_blocklist.go index 64802963931d..f7bdf2e004fe 100644 --- a/pkg/cmd/roachtest/tests/libpq_blocklist.go +++ b/pkg/cmd/roachtest/tests/libpq_blocklist.go @@ -22,7 +22,6 @@ var libPQBlocklist = blocklist{ "pq.TestCopyInRaiseStmtTrigger": "5807", "pq.TestCopyInTypes": "5807", "pq.TestCopyRespLoopConnectionError": "5807", - "pq.TestInfinityTimestamp": "41564", "pq.TestIssue186": "41558", "pq.TestIssue196": "41689", "pq.TestIssue282": "12137", diff --git a/pkg/cmd/roachtest/tests/logical_data_replication.go b/pkg/cmd/roachtest/tests/logical_data_replication.go index 49a197295834..8c71395bf2b6 100644 --- a/pkg/cmd/roachtest/tests/logical_data_replication.go +++ b/pkg/cmd/roachtest/tests/logical_data_replication.go @@ -359,7 +359,7 @@ func TestLDROnNodeShutdown( // Graceful shutdown on both nodes // TODO(naveen.setlur): maybe switch this to a less graceful shutdown via SIGKILL - stopOpts := option.NewStopOpts(option.Graceful(shutdownMaxWait)) + stopOpts := option.NewStopOpts(option.Graceful(shutdownGracePeriod)) t.L().Printf("Shutting down node-left: %d", nodeToStopL) monitor.ExpectDeath() if err := c.StopE(ctx, t.L(), stopOpts, c.Node(nodeToStopL)); err != nil { diff --git a/pkg/cmd/roachtest/tests/loss_of_quorum_recovery.go b/pkg/cmd/roachtest/tests/loss_of_quorum_recovery.go index 2bcaa17ddecb..73c351902093 100644 --- a/pkg/cmd/roachtest/tests/loss_of_quorum_recovery.go +++ b/pkg/cmd/roachtest/tests/loss_of_quorum_recovery.go @@ -71,7 +71,7 @@ func registerLOQRecovery(r registry.Registry) { testSpec := s r.Add(registry.TestSpec{ Name: s.testName(""), - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Benchmark: true, CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), @@ -86,7 +86,7 @@ func registerLOQRecovery(r registry.Registry) { }) r.Add(registry.TestSpec{ Name: s.testName("half-online"), - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Benchmark: true, CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), diff --git a/pkg/cmd/roachtest/tests/mixed_version_backup.go b/pkg/cmd/roachtest/tests/mixed_version_backup.go index 4421215f3ac6..fccf0b69c5dc 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_backup.go +++ b/pkg/cmd/roachtest/tests/mixed_version_backup.go @@ -2661,8 +2661,6 @@ func registerBackupMixedVersion(r registry.Registry) { mixedversion.ClusterSettingMutator("storage.ingest_split.enabled"), mixedversion.ClusterSettingMutator("storage.sstable.compression_algorithm"), ), - // Multi-tenant deployments are currently unsupported. See #127378. - mixedversion.EnabledDeploymentModes(mixedversion.SystemOnlyDeployment), ) testRNG := mvt.RNG() @@ -2778,7 +2776,7 @@ func schemaChangeWorkloadCmd( // TODO (msbutler): ideally we'd use the `db` flag to explicitly set the // database, but it is currently broken: // https://github.com/cockroachdb/cockroach/issues/115545 - runCmd := roachtestutil.NewCommand(fmt.Sprintf("COCKROACH_RANDOM_SEED=%d ./workload run schemachange", testRNG.Int63())). + runCmd := roachtestutil.NewCommand("COCKROACH_RANDOM_SEED=%d ./workload run schemachange", testRNG.Int63()). Flag("verbose", 1). Flag("max-ops", maxOps). Flag("concurrency", concurrency). diff --git a/pkg/cmd/roachtest/tests/mixed_version_c2c.go b/pkg/cmd/roachtest/tests/mixed_version_c2c.go new file mode 100644 index 000000000000..162fcca49793 --- /dev/null +++ b/pkg/cmd/roachtest/tests/mixed_version_c2c.go @@ -0,0 +1,475 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "fmt" + "math/rand" + "net/url" + "sync" + "time" + + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/registry" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil/mixedversion" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/spec" + "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" + "github.com/cockroachdb/cockroach/pkg/roachprod" + "github.com/cockroachdb/cockroach/pkg/roachprod/install" + "github.com/cockroachdb/cockroach/pkg/roachprod/logger" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" +) + +func registerC2CMixedVersions(r registry.Registry) { + + sp := replicationSpec{ + srcNodes: 4, + dstNodes: 4, + timeout: 30 * time.Minute, + additionalDuration: 0 * time.Minute, + cutover: 30 * time.Second, + skipNodeDistributionCheck: true, + clouds: registry.AllClouds, + suites: registry.Suites(registry.Nightly), + } + + r.Add(registry.TestSpec{ + Name: "c2c/mixed-version", + Owner: registry.OwnerDisasterRecovery, + Cluster: r.MakeClusterSpec(sp.dstNodes+sp.srcNodes+1, spec.WorkloadNode()), + CompatibleClouds: sp.clouds, + Suites: registry.Suites(registry.Nightly), + Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { + runC2CMixedVersions(ctx, t, c, sp) + }, + }) +} + +const ( + expectedMajorUpgrades = 1 + destTenantName = "dest" + replicationJobType = "REPLICATION STREAM INGESTION" + fingerprintQuery = `SELECT fingerprint FROM [SHOW EXPERIMENTAL_FINGERPRINTS FROM VIRTUAL CLUSTER $1 WITH START TIMESTAMP = '%s'] AS OF SYSTEM TIME '%s'` +) + +func runC2CMixedVersions(ctx context.Context, t test.Test, c cluster.Cluster, sp replicationSpec) { + cm := InitC2CMixed(ctx, t, c, sp) + cm.SetupHook(ctx) + cm.WorkloadHook(ctx) + cm.LatencyHook(ctx) + cm.UpdateHook(ctx) + cm.Run(ctx, c) +} + +func InitC2CMixed( + ctx context.Context, t test.Test, c cluster.Cluster, sp replicationSpec, +) *c2cMixed { + // TODO(msbutler): allow for version skipping and multiple upgrades. + sourceMvt := mixedversion.NewTest(ctx, t, t.L(), c, c.Range(1, sp.srcNodes), + mixedversion.AlwaysUseLatestPredecessors, + mixedversion.NumUpgrades(expectedMajorUpgrades), + mixedversion.EnabledDeploymentModes(mixedversion.SharedProcessDeployment), + mixedversion.WithTag("source"), + mixedversion.DisableSkipVersionUpgrades, + ) + + destMvt := mixedversion.NewTest(ctx, t, t.L(), c, c.Range(sp.srcNodes+1, sp.srcNodes+sp.dstNodes), + mixedversion.AlwaysUseLatestPredecessors, + mixedversion.NumUpgrades(expectedMajorUpgrades), + mixedversion.EnabledDeploymentModes(mixedversion.SystemOnlyDeployment), + mixedversion.WithTag("dest"), + mixedversion.DisableSkipVersionUpgrades, + ) + + return &c2cMixed{ + sourceMvt: sourceMvt, + destMvt: destMvt, + sourceStartedChan: make(chan struct{}), + destStartedChan: make(chan struct{}), + sp: sp, + t: t, + c: c, + fingerprintArgsChan: make(chan fingerprintArgs, 1), + fingerprintChan: make(chan int64, 1), + } +} + +type sourceTenantInfo struct { + name string + pgurl *url.URL +} + +type fingerprintArgs struct { + retainedTime hlc.Timestamp + cutoverTime hlc.Timestamp +} + +type c2cMixed struct { + sourceMvt, destMvt *mixedversion.Test + // sourceStartedChan ensures the source cluster is started before the + // destination cluster is started. The source must be created before the dest + // due to a limitation in roachprod #129318. + sourceStartedChan chan struct{} + // destStartedChan prevents the source from starting upgrading until PCR has + // completed its initial scan during dest startup. In the future, we may relax + // this guardrail. + destStartedChan chan struct{} + sp replicationSpec + t test.Test + c cluster.Cluster + // fingerprintArgsChan sends information from dest to source about the correct + // arguments to use for fingerprinting. This channel is buffered so the dest + // can begin fingerprinting if the source is not ready to fingerprint. + fingerprintArgsChan chan fingerprintArgs + fingerprintChan chan int64 + // midUpgradeCatchupMu _attempts_ to prevent the source from upgrading while + // the destination is waiting for the stream to catch up in some mixed version + // state. + midUpgradeCatchupMu syncutil.Mutex + + ingestionJobID catpb.JobID + workloadStopper mixedversion.StopFunc +} + +func (cm *c2cMixed) SetupHook(ctx context.Context) { + // sourceInfoChan provides the destination with source cluster info generated + // during source startup. The channel is buffered so the source runner can + // buffer the information and proceed with the upgrade process even if the + // destination is not ready to receive the information. + sourceInfoChan := make(chan sourceTenantInfo, 1) + + cm.sourceMvt.OnStartup( + "generate pgurl", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + // Enable rangefeeds, required for PCR to work. + l.Printf("enabling rangefeeds") + if err := h.System.Exec(r, "SET CLUSTER SETTING kv.rangefeed.enabled = true"); err != nil { + return errors.Wrap(err, "failed to enable rangefeeds") + } + close(cm.sourceStartedChan) + + l.Printf("generating pgurl") + srcNode := cm.c.Node(1) + srcClusterSetting := install.MakeClusterSettings() + addr, err := cm.c.ExternalPGUrl(ctx, l, srcNode, roachprod.PGURLOptions{ + VirtualClusterName: install.SystemInterfaceName, + }) + if err != nil { + return err + } + + pgURL, err := copyPGCertsAndMakeURL(ctx, cm.t, cm.c, srcNode, srcClusterSetting.PGUrlCertsDir, addr[0]) + if err != nil { + return err + } + + sourceInfoChan <- sourceTenantInfo{name: h.Tenant.Descriptor.Name, pgurl: pgURL} + + // TODO(msbutler): once we allow upgrades during initial scan, remove the + // destStartedChan. + l.Printf("waiting for destination tenant to be created and replication stream to begin") + chanReadCtx(ctx, cm.destStartedChan) + l.Printf("done") + + return nil + }, + ) + + cm.destMvt.OnStartup("create destination tenant on standby", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + l.Printf("waiting to hear from source cluster") + sourceInfo := chanReadCtx(ctx, sourceInfoChan) + + if err := h.Exec(r, fmt.Sprintf( + "CREATE TENANT %q FROM REPLICATION OF %q ON $1", + destTenantName, sourceInfo.name, + ), sourceInfo.pgurl.String()); err != nil { + return errors.Wrap(err, "creating destination tenant") + } + + if err := h.QueryRow(r, + "SELECT job_id FROM [SHOW JOBS] WHERE job_type = $1", + replicationJobType, + ).Scan(&cm.ingestionJobID); err != nil { + return errors.Wrap(err, "querying ingestion job ID") + } + + l.Printf("replication job: %d. Let initial scan complete", cm.ingestionJobID) + // TODO(msbutler): relax requirement that initial scan completes before upgrades. + if err := cm.WaitForReplicatedTime(ctx, timeutil.Now(), h, r, 5*time.Minute); err != nil { + return err + } + close(cm.destStartedChan) + return nil + }) +} + +func (cm *c2cMixed) WorkloadHook(ctx context.Context) { + tpccInitCmd := roachtestutil.NewCommand("./cockroach workload init tpcc"). + Arg("{pgurl%s}", cm.c.Range(1, cm.sp.srcNodes)). + Flag("warehouses", 10) + tpccRunCmd := roachtestutil.NewCommand("./cockroach workload run tpcc"). + Arg("{pgurl%s}", cm.c.Range(1, cm.sp.srcNodes)). + Option("tolerate-errors"). + Flag("warehouses", 500) + cm.workloadStopper = cm.sourceMvt.Workload("tpcc", cm.c.WorkloadNode(), tpccInitCmd, tpccRunCmd) +} + +func (cm *c2cMixed) LatencyHook(ctx context.Context) { + cm.destMvt.BackgroundFunc("latency verifier", func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + lv := makeLatencyVerifier("stream-ingestion", 0, cm.sp.maxAcceptedLatency, l, + getStreamIngestionJobInfo, func(args ...interface{}) { l.Printf(fmt.Sprintln(args...)) }, true) + defer lv.maybeLogLatencyHist() + _, db := h.RandomDB(r) + + // The latency verify doesn't need a stopper, as ctx cancellation will stop it. + dummyCh := make(chan struct{}) + if err := lv.pollLatencyUntilJobSucceeds(ctx, db, int(cm.ingestionJobID), time.Second*5, dummyCh); ctx.Err() == nil { + // The ctx is cancelled when the background func is successfully stopped, + // therefore, don't return a context cancellation error. + return errors.Wrapf(err, "latency verifier failed") + } + return nil + }) +} + +// UpdateHook registers a few mixed version hooks that ensure that the upgrading +// clusters obey several invariants, which include: +// - the destination cluster must be the same or at most one major version ahead +// of the source cluster. This implies that for a given major upgrade, the +// destination cluster finalizes before the source cluster. +// - the app tenant must finalize after the system tenant (baked into the mixed +// version framework). +// +// The hooks also conduct the following: +// - during random destination side mixed version states, the upgrade processes +// will wait for the PCR replicated time to catch up, validating the stream can +// advance in a mixed version state. +// - After the destination has upgraded to its final version, it will issue a +// cutover command and fingerprint the app tenant key space. +// - The source will also run a fingerprint command using the same timestamps +// and ensure the fingerprints are the same. +func (cm *c2cMixed) UpdateHook(ctx context.Context) { + destFinalized := make(chan struct{}, 1) + + cm.destMvt.InMixedVersion("maybe wait for replicated time", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + if r.Intn(3) == 0 { + cm.midUpgradeCatchupMu.Lock() + defer cm.midUpgradeCatchupMu.Unlock() + return cm.WaitForReplicatedTime(ctx, nowLess30Seconds(), h, r, 5*time.Minute) + } + return nil + }) + + cm.sourceMvt.InMixedVersion( + "wait for dest to finalize if source is ready to finalize upgrade", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + + // If we have to wait for the lock, that implies the destination is + // waiting for the replication time to catch up in some mixed version + // state. + // + // NB: this lock is a best effort attempt to pause the source side upgrade + // process from ocurring while the dest is waiting for the replication + // time to catch up. Specifically, when the source side is restarting + // nodes, it prevents the replication stream from advancing. This lock is + // best effort because the source could acquire and release the lock right + // before the dest acquires it, allowing the source side upgrade step to + // proceed. Furthermore, the source does not acquire this lock on every + // node restart step-- rather it is acquired on each InMixedVersion call, + // which occurs up to 4 times for each major upgrade. + cm.midUpgradeCatchupMu.Lock() + l.Printf("acquired mid upgrade lock") // nolint:deferunlockcheck + cm.midUpgradeCatchupMu.Unlock() + if h.Context().Stage == mixedversion.LastUpgradeStage { + l.Printf("waiting for destination cluster to finalize upgrade") + chanReadCtx(ctx, destFinalized) + } else { + l.Printf("no need to wait for dest: not ready to finalize") + } + return nil + }, + ) + + destMajorUpgradeCount := 0 + cm.destMvt.AfterUpgradeFinalized( + "cutover and allow source to finalize", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + // Ensure the source always waits to finalize until after the dest finalizes. + // NB: the cutover may happen while the source is still in some mixed version state. + destFinalized <- struct{}{} + destMajorUpgradeCount++ + if destMajorUpgradeCount == expectedMajorUpgrades { + return cm.destCutoverAndFingerprint(ctx, l, r, h) + } + return nil + }, + ) + + sourceMajorUpgradeCount := 0 + cm.sourceMvt.AfterUpgradeFinalized( + "fingerprint source", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + sourceMajorUpgradeCount++ + if sourceMajorUpgradeCount == expectedMajorUpgrades { + return cm.sourceFingerprintAndCompare(ctx, l, r, h) + } + return nil + }, + ) +} + +func (cm *c2cMixed) destCutoverAndFingerprint( + ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper, +) error { + + // Wait for destination to catch up before cutover. + // + // TODO(msbutler): test cutting over to a time when the app tenant is a + // different version. + if err := cm.WaitForReplicatedTime(ctx, nowLess30Seconds(), h, r, 5*time.Minute); err != nil { + return err + } + + var retainedTime time.Time + if err := h.QueryRow(r, + `SELECT retained_time FROM [SHOW TENANT $1 WITH REPLICATION STATUS]`, destTenantName).Scan(&retainedTime); err != nil { + return err + } + retainedHLCTime := hlc.Timestamp{WallTime: retainedTime.UnixNano()} + + var cutoverStr string + if err := h.QueryRow(r, "ALTER TENANT $1 COMPLETE REPLICATION TO LATEST", destTenantName).Scan(&cutoverStr); err != nil { + return err + } + cutover, err := hlc.ParseHLC(cutoverStr) + if err != nil { + return err + } + _, db := h.RandomDB(r) + if err := WaitForSucceeded(ctx, db, cm.ingestionJobID, time.Minute); err != nil { + return err + } + + l.Printf("Retained time %s; cutover time %s", retainedHLCTime.GoTime(), cutover.GoTime()) + // The fingerprint args are sent over to the source before the dest begins + // fingerprinting merely so both clusters can run the fingerprint commands in + // parallel. + cm.fingerprintArgsChan <- fingerprintArgs{ + retainedTime: retainedHLCTime, + cutoverTime: cutover, + } + var destFingerprint int64 + if err := h.QueryRow(r, + fmt.Sprintf(fingerprintQuery, retainedHLCTime.AsOfSystemTime(), cutover.AsOfSystemTime()), + destTenantName, + ).Scan(&destFingerprint); err != nil { + return err + } + cm.fingerprintChan <- destFingerprint + // TODO(msbutler): we could spin up the workload for a bit on the destination, + // just to check that it works after cutover. + return nil +} + +func (cm *c2cMixed) sourceFingerprintAndCompare( + ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper, +) error { + args := chanReadCtx(ctx, cm.fingerprintArgsChan) + cm.workloadStopper() + var sourceFingerprint int64 + if err := h.System.QueryRow(r, + fmt.Sprintf(fingerprintQuery, args.retainedTime.AsOfSystemTime(), args.cutoverTime.AsOfSystemTime()), + h.Tenant.Descriptor.Name, + ).Scan(&sourceFingerprint); err != nil { + return err + } + + destFingerprint := chanReadCtx(ctx, cm.fingerprintChan) + if sourceFingerprint != destFingerprint { + return errors.Newf("source fingerprint %d does not match dest fingerprint %d", sourceFingerprint, destFingerprint) + } + return nil +} + +func (cm *c2cMixed) Run(ctx context.Context, c cluster.Cluster) { + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer func() { + if r := recover(); r != nil { + cm.t.L().Printf("source cluster upgrade failed: %v", r) + } + }() + defer wg.Done() + cm.sourceMvt.Run() + }() + + go func() { + defer func() { + if r := recover(); r != nil { + cm.t.L().Printf("destination cluster upgrade failed: %v", r) + } + }() + defer wg.Done() + + chanReadCtx(ctx, cm.sourceStartedChan) + cm.destMvt.Run() + }() + + wg.Wait() +} + +func (cm *c2cMixed) WaitForReplicatedTime( + ctx context.Context, + targetTime time.Time, + h *mixedversion.Helper, + r *rand.Rand, + timeout time.Duration, +) error { + cm.t.L().Printf("waiting for replicated time to advance past %s", targetTime) + return testutils.SucceedsWithinError(func() error { + query := "SELECT replicated_time FROM [SHOW TENANT $1 WITH REPLICATION STATUS]" + var replicatedTime time.Time + _, db := h.RandomDB(r) + if err := db.QueryRowContext(ctx, query, destTenantName).Scan(&replicatedTime); err != nil { + return err + } + if !replicatedTime.After(targetTime) { + return errors.Newf("replicated time %s not yet at %s", replicatedTime, targetTime) + } + cm.t.L().Printf("replicated time is now %s, past %s", replicatedTime, targetTime) + return nil + }, timeout) +} + +func nowLess30Seconds() time.Time { + return timeutil.Now().Add(-30 * time.Second) +} + +func chanReadCtx[T any](ctx context.Context, ch <-chan T) T { + select { + case v := <-ch: + return v + case <-ctx.Done(): + var zero T + return zero + } +} diff --git a/pkg/cmd/roachtest/tests/mixed_version_change_replicas.go b/pkg/cmd/roachtest/tests/mixed_version_change_replicas.go index 176ff2b48c6a..685efe8068a1 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_change_replicas.go +++ b/pkg/cmd/roachtest/tests/mixed_version_change_replicas.go @@ -31,7 +31,7 @@ import ( func registerChangeReplicasMixedVersion(r registry.Registry) { r.Add(registry.TestSpec{ Name: "change-replicas/mixed-version", - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Cluster: r.MakeClusterSpec(4), CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), diff --git a/pkg/cmd/roachtest/tests/mixed_version_import.go b/pkg/cmd/roachtest/tests/mixed_version_import.go index 14ceb5e5e185..3dcb1e4f48f7 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_import.go +++ b/pkg/cmd/roachtest/tests/mixed_version_import.go @@ -50,6 +50,8 @@ func runImportMixedVersions(ctx context.Context, t test.Test, c cluster.Cluster, // the `workload fixtures import` command, which is only supported // reliably multi-tenant mode starting from that version. mixedversion.MinimumSupportedVersion("v23.2.0"), + // Only use the latest version of each release to work around #127029. + mixedversion.AlwaysUseLatestPredecessors, ) runImport := func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { if err := h.Exec(r, "DROP DATABASE IF EXISTS tpcc CASCADE;"); err != nil { diff --git a/pkg/cmd/roachtest/tests/mixed_version_multi_region.go b/pkg/cmd/roachtest/tests/mixed_version_multi_region.go index 445cfeede121..19f15043b6e7 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_multi_region.go +++ b/pkg/cmd/roachtest/tests/mixed_version_multi_region.go @@ -122,6 +122,13 @@ func registerMultiRegionMixedVersion(r registry.Registry) { mvt.OnStartup( "setup tpcc", func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { + if err := enableTenantSplitScatter(l, rng, h); err != nil { + return err + } + if err := enableTenantMultiRegion(l, rng, h); err != nil { + return err + } + setupTPCC(ctx, t, l, c, backgroundTPCCOpts) // Update the `SetupType` so that the corresponding // `runTPCC` calls don't attempt to import data again. diff --git a/pkg/cmd/roachtest/tests/multi_region_system_database.go b/pkg/cmd/roachtest/tests/multi_region_system_database.go index f2681943709d..9b498de8e355 100644 --- a/pkg/cmd/roachtest/tests/multi_region_system_database.go +++ b/pkg/cmd/roachtest/tests/multi_region_system_database.go @@ -67,7 +67,7 @@ func registerMultiRegionSystemDatabase(r registry.Registry) { // Perform rolling restart to propagate region information to non-primary nodes for i := 2; i <= nodes; i++ { t.WorkerStatus("stop") - c.Stop(ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownMaxWait)), c.Node(i)) + c.Stop(ctx, t.L(), option.NewStopOpts(option.Graceful(shutdownGracePeriod)), c.Node(i)) t.WorkerStatus("start") startOpts := option.DefaultStartOpts() c.Start(ctx, t.L(), startOpts, install.MakeClusterSettings(install.SecureOption(false)), c.Node(i)) diff --git a/pkg/cmd/roachtest/tests/multitenant_distsql.go b/pkg/cmd/roachtest/tests/multitenant_distsql.go index 78a500316d53..af566a9f3936 100644 --- a/pkg/cmd/roachtest/tests/multitenant_distsql.go +++ b/pkg/cmd/roachtest/tests/multitenant_distsql.go @@ -37,6 +37,8 @@ func registerMultiTenantDistSQL(r registry.Registry) { b := bundle to := timeout r.Add(registry.TestSpec{ + Skip: "https://github.com/cockroachdb/cockroach/issues/128366", + SkipDetails: "test is broken", Name: fmt.Sprintf("multitenant/distsql/instances=%d/bundle=%s/timeout=%d", numInstances, b, to), Owner: registry.OwnerSQLQueries, Cluster: r.MakeClusterSpec(4), diff --git a/pkg/cmd/roachtest/tests/mvcc_gc.go b/pkg/cmd/roachtest/tests/mvcc_gc.go index 545236882ab8..cf11ad637f73 100644 --- a/pkg/cmd/roachtest/tests/mvcc_gc.go +++ b/pkg/cmd/roachtest/tests/mvcc_gc.go @@ -637,7 +637,7 @@ func sendBatchRequest( debugEnv = fmt.Sprintf("COCKROACH_RANDOM_SEED=%d ", randomSeed) } cmd := roachtestutil.NewCommand("./cockroach debug send-kv-batch"). - Arg(requestFileName). + Arg("%s", requestFileName). Flag("certs-dir", install.CockroachNodeCertsDir). Flag("host", fmt.Sprintf("localhost:{pgport:%d}", node)). String() diff --git a/pkg/cmd/roachtest/tests/npgsql_blocklist.go b/pkg/cmd/roachtest/tests/npgsql_blocklist.go index 6850295a728b..a141b1fe75fe 100644 --- a/pkg/cmd/roachtest/tests/npgsql_blocklist.go +++ b/pkg/cmd/roachtest/tests/npgsql_blocklist.go @@ -493,12 +493,6 @@ var npgsqlBlocklist = blocklist{ `Npgsql.Tests.Types.CompositeTests(NonMultiplexing).Table_as_composite(True)`: "unknown", `Npgsql.Tests.Types.CompositeTests(NonMultiplexing).Table_as_composite_with_deleted_columns`: "unknown", `Npgsql.Tests.Types.CompositeTests(NonMultiplexing).With_schema`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(False).TimestampTz_read`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(False).TimestampTz_write`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(False).Timestamp_read`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(False).Timestamp_write`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(True).TimestampTz_read`: "unknown", - `Npgsql.Tests.Types.DateTimeInfinityTests(True).Timestamp_read`: "unknown", `Npgsql.Tests.Types.DateTimeTests.Cannot_mix_DateTime_Kinds_in_range`: "unknown", `Npgsql.Tests.Types.DateTimeTests.Daterange_as_NpgsqlRange_of_DateOnly`: "unknown", `Npgsql.Tests.Types.DateTimeTests.Daterange_as_NpgsqlRange_of_DateTime`: "unknown", diff --git a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go index 5024692c0020..bc2c19f9b68f 100644 --- a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go +++ b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go @@ -14,642 +14,620 @@ package tests // After a failed run, an updated version of this blocklist should be available // in the test log. var pgjdbcBlockList = blocklist{ - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=BOX, oidValue=603]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=BOX_ARRAY, oidValue=1,020]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=CIDR, oidValue=650]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=CIRCLE, oidValue=718]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=JSON, oidValue=114]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=JSON_ARRAY, oidValue=199]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=LINE, oidValue=628]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=LSEG, oidValue=601]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MACADDR, oidValue=829]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MACADDR8, oidValue=774]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MONEY, oidValue=790]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MONEY_ARRAY, oidValue=791]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=PATH, oidValue=602]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POINT, oidValue=600]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POINT_ARRAY, oidValue=1,017]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POLYGON, oidValue=604]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=XML, oidValue=142]`: "unknown", - `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=XML_ARRAY, oidValue=143]`: "unknown", - `org.postgresql.jdbc.BitFieldTest.TestGetObjectForBitFields`: "unknown", - `org.postgresql.jdbc.BitFieldTest.TestSetBitParameter`: "unknown", - `org.postgresql.jdbc.LargeObjectManagerTest.objectWriteThenRead()`: "unknown", - `org.postgresql.jdbc.LargeObjectManagerTest.openWithErrorAndSubsequentParameterStatusMessageShouldLeaveConnectionInUsableStateAndUpdateParameterStatus()`: "unknown", - `org.postgresql.jdbc.UUIDArrayTest.initializationError`: "32552", - `org.postgresql.replication.CopyBothResponseTest.keedAliveContainsCorrectLSN()`: "unknown", - `org.postgresql.replication.CopyBothResponseTest.openConnectByReplicationProtocol()`: "unknown", - `org.postgresql.replication.CopyBothResponseTest.receiveKeepAliveMessage()`: "unknown", - `org.postgresql.replication.CopyBothResponseTest.receiveXLogData()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationDoNotChangeDuringReceiveMessage()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationDoNotDependOnFlushLocation()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationEqualToSetLocation()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.flushLocationDoNotChangeDuringReceiveMessage()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.flushLocationEqualToSetLocation()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.keepAliveServerLSNCanBeUsedToAdvanceFlushLSN()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.lastReceiveLSNCorrectOnView()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.receivedLSNDependentOnProcessMessage()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.sentLocationEqualToLastReceiveLSN()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.statusCanBeSentToBackendAsynchronously()`: "unknown", - `org.postgresql.replication.LogicalReplicationStatusTest.writeLocationCanBeLessThanSendLocation()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.afterCloseConnectionDBSLotStatusNotActive()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.afterCloseReplicationStreamDBSlotStatusNotActive()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.afterStartStreamingDBSlotStatusActive()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.avoidTimeoutDisconnectWithDefaultStatusInterval()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.doesNotHavePendingMessageWhenStartFromLastLSN()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.duringSendBigTransactionConnectionCloseSlotStatusNotActive()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.duringSendBigTransactionReplicationStreamCloseNotActive()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.notAvailableStartNotExistReplicationSlot()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.readActualChangesWithoutBlock()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.readPreviousChangesWithoutBlock()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.receiveChangesAfterStartReplication()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.receiveChangesOccursBeforeStartReplication()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.repeatWalPositionTwice()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.replicationRestartFromLastFeedbackPosition()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.replicationRestartFromLastFeedbackPositionParallelTransaction()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.restartReplicationFromRestartSlotLSNWhenFeedbackAbsent()`: "unknown", - `org.postgresql.replication.LogicalReplicationTest.startFromCurrentServerLSNWithoutSpecifyLSNExplicitly()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.afterCloseReplicationStreamDBSlotStatusNotActive()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.afterStartStreamingDBSlotStatusActive()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.receiveChangesWithReplicationSlot()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.receiveChangesWithoutReplicationSlot()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.restartPhysicalReplicationWithoutRepeatMessage()`: "unknown", - `org.postgresql.replication.PhysicalReplicationTest.walRecordCanBeRepeatBeRestartReplication()`: "unknown", - `org.postgresql.replication.ReplicationConnectionTest.connectionNotValidWhenSessionTerminated()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createLogicalSlot()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createLogicalSlotReturnedInfo()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createPhysicalSlot()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createPhysicalSlotReturnedInfo()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createTemporaryLogicalSlotPg10AndHigher()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.createTemporaryPhysicalSlotPg10AndHigher()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.dropLogicalSlot()`: "unknown", - `org.postgresql.replication.ReplicationSlotTest.dropPhysicalSlot()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.batchExplicitlyEnabled()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.batchWithDefaults()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.batchWithLogServerErrorDetailDisabled()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.withDefaults()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.withExplicitlyEnabled()`: "unknown", - `org.postgresql.test.core.LogServerMessagePropertyTest.withLogServerErrorDetailDisabled()`: "unknown", - `org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = FORCE]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = REGULAR]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = FORCE]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = REGULAR]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = FORCE]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = REGULAR]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = FORCE]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = REGULAR]`: "41405", - `org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = FORCE]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = REGULAR]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = FORCE]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = REGULAR]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = FORCE]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = REGULAR]`: "26925", - `org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]`: "41513", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]`: "41513", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]`: "41513", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = true]`: "41513", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = false]`: "26366", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = true]`: "26366", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = false]`: "26366", - `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = true]`: "26366", - `org.postgresql.test.jdbc2.BlobTest.initializationError`: "unknown", - `org.postgresql.test.jdbc2.BlobTransactionTest.concurrentReplace()`: "102201", - `org.postgresql.test.jdbc2.CallableStmtTest.testBadStmt`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testBatchCall`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testFetchBeforeExecute`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testFetchWithNoResults`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetArray`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetDouble`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetInt`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetNumeric`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetNumericWithoutArg`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetShort`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetString`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testGetUpdateCount`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testRaiseNotice`: "17511", - `org.postgresql.test.jdbc2.CallableStmtTest.testWasNullBeforeFetch`: "17511", - `org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=false]`: "37129", - `org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=true]`: "37129", - `org.postgresql.test.jdbc2.ConnectionTest.createStatement()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.doubleClose()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.getTypeMapEmpty()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.isClosed()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.nativeSQL()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.pGStreamSettings()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.prepareCall()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.prepareStatement()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.readOnly_always()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.readOnly_ignore()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.readOnly_transaction()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.transactionIsolation()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.transactions()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.typeMaps()`: "41578", - `org.postgresql.test.jdbc2.ConnectionTest.warnings()`: "41578", - `org.postgresql.test.jdbc2.CopyTest.changeDateStyle()`: "unknown", - `org.postgresql.test.jdbc2.CopyTest.copyMultiApi()`: "unknown", - `org.postgresql.test.jdbc2.CopyTest.lockReleaseOnCancelFailure()`: "unknown", - `org.postgresql.test.jdbc2.CustomTypeWithBinaryTransferTest.initializationError`: "21286", - `org.postgresql.test.jdbc2.DatabaseMetaDataCacheTest.getSQLTypeQueryCache()`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTest.binary = FORCE`: "32552", - `org.postgresql.test.jdbc2.DatabaseMetaDataTest.binary = REGULAR`: "32552", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[1] 8`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[1] read committed`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[2] 4`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[2] read uncommitted`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[3] 2`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[3] repeatable read`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[4] 1`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[4] serializable`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.connectionTransactionIsolation()`: "unknown", - `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.metadataDefaultTransactionIsolation()`: "unknown", - `org.postgresql.test.jdbc2.DateStyleTest.connect[dateStyle=PostgreSQL, shouldPass=false]`: "unknown", - `org.postgresql.test.jdbc2.EnumTest.enumArrayArray[binary = FORCE]`: "51480", - `org.postgresql.test.jdbc2.EnumTest.enumArrayArray[binary = REGULAR]`: "51480", - `org.postgresql.test.jdbc2.EnumTest.enumArray[binary = FORCE]`: "51480", - `org.postgresql.test.jdbc2.EnumTest.enumArray[binary = REGULAR]`: "51480", - `org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.JBuilderTest.money()`: "41578", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotify()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithEndlessTimeoutAndMessagesSendAfter()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeout()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndMessagesSendAfter()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndSocketThatBecomesClosed()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.notifyArgument()`: "unknown", - `org.postgresql.test.jdbc2.NotifyTest.testNotify()`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest.receive100000[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest.sendReceive100000[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = -1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 0.1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 0]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1,000,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1.1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 10,000,000,000,000,000,000,000,000,000,000,000,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 20,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 40,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 9,990,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = -1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 0.1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 0]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1,000,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1.1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 10,000,000,000,000,000,000,000,000,000,000,000,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 20,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 40,000]`: "unknown", - `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 9,990,000]`: "unknown", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", - `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", - `org.postgresql.test.jdbc2.ParameterStatusTest.transactionalParametersRollback`: "69396", - `org.postgresql.test.jdbc2.PreparedStatementTest.testBatchWithPrepareThreshold5[binary = REGULAR]`: "5807", - `org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = FORCE]`: "36215", - `org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = REGULAR]`: "36215", - `org.postgresql.test.jdbc2.PreparedStatementTest.testTrailingSpaces[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc2.PreparedStatementTest.testTrailingSpaces[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = FORCE]`: "41779", - `org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = REGULAR]`: "41779", - `org.postgresql.test.jdbc2.RefCursorFetchTest.initializationError`: "unknown", - `org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = OTHER, cursorType = 1,111]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = OTHER, cursorType = 1,111]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = OTHER, cursorType = 1,111]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = OTHER, cursorType = 1,111]`: "17511", - `org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "48532", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", - `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", - `org.postgresql.test.jdbc2.ResultSetTest.testRowResultPositioning[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc2.ResultSetTest.testRowResultPositioning[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.ResultSetTest.testTimestamp[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc2.ResultSetTest.testTimestamp[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.ResultSetTest.testgetBadBoolean[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc2.ResultSetTest.testgetBadBoolean[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc2.SearchPathLookupTest.searchPathNormalLookup()`: "unknown", - `org.postgresql.test.jdbc2.ServerCursorTest.testBinaryFetch`: "41412", - `org.postgresql.test.jdbc2.ServerErrorTest.testCheckConstraint`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testColumn`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testDatatype`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testExclusionConstraint`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testForeignKeyConstraint`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testNotNullConstraint`: "27796", - `org.postgresql.test.jdbc2.ServerErrorTest.testPrimaryKey`: "27796", - `org.postgresql.test.jdbc2.StatementTest.closeInProgressStatement()`: "unknown", - `org.postgresql.test.jdbc2.StatementTest.concurrentWarningReadAndClear()`: "unknown", - `org.postgresql.test.jdbc2.StatementTest.fastCloses()`: "unknown", - `org.postgresql.test.jdbc2.StatementTest.parsingSemiColons()`: "unknown", - `org.postgresql.test.jdbc2.StatementTest.updateCount()`: "unknown", - `org.postgresql.test.jdbc2.StatementTest.warningsAreAvailableAsap()`: "unknown", - `org.postgresql.test.jdbc2.TimeTest.getTimeZone()`: "unknown", - `org.postgresql.test.jdbc2.TimestampTest.testGetTimestampWOTZ[binary = FORCE]`: "41786", - `org.postgresql.test.jdbc2.TimestampTest.testGetTimestampWOTZ[binary = REGULAR]`: "41786", - `org.postgresql.test.jdbc2.TimestampTest.testInfinity[binary = FORCE]`: "41786", - `org.postgresql.test.jdbc2.TimestampTest.testInfinity[binary = REGULAR]`: "41786", - `org.postgresql.test.jdbc2.TimestampTest.testSetTimestampWOTZ[binary = FORCE]`: "41786", - `org.postgresql.test.jdbc2.TimestampTest.testSetTimestampWOTZ[binary = REGULAR]`: "41786", - `org.postgresql.test.jdbc2.UpdateableResultTest.testArray`: "26925", - `org.postgresql.test.jdbc2.UpdateableResultTest.testOidUpdatable`: "unknown", - `org.postgresql.test.jdbc3.CompositeTest.complexArgumentSelect()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.complexSelect()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.complexTableNameMetadata()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.compositeFromTable()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.nullArrayElement()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.simpleArgumentSelect()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.simpleSelect()`: "51480", - `org.postgresql.test.jdbc3.CompositeTest.tableMetadata()`: "51480", - `org.postgresql.test.jdbc3.DatabaseMetaDataTest.getColumnsForDomain()`: "27796", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunction`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunctionHavingReturnParameter`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeProcedure`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunction`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunctionHavingReturnParameter`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeProcedure`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunction`: "17511", - `org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunctionHavingReturnParameter`: "17511", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.allBytes()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.allBytesStream()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytes()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesOffset()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesOffsetStream()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesStream()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.pattern()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1Byte()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffset()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffsetStream()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteStream()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3BlobTest.truncate()`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testAllInOut`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testCall5Times`: "unknown", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithParentheses`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithoutParentheses`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBit1WithoutArg`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBit2WithoutArg`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBoolean01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBooleanWithoutArg`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetByte01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes02`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDouble01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDoubleAsReal`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetInt01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetLong01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectDecimal`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectFloat`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectLongVarchar`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetShort01`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testInOut`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testMultipleOutExecutions`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNotEnoughParameters`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNumeric`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOnlyNativeCall`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOutNativeCall`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithParentheses`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithoutParentheses`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSetObjectBit`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSomeInOut`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSum`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testTooManyParameters`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateDecimal`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateReal`: "121375", - `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testVarcharBool`: "121375", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testFailsOnBadIndex[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testFailsOnBadIndex[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testMultiStatement[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testMultiStatement[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testParameterMD[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testParameterMD[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testTypeChangeWithUnknown[binary = FORCE]`: "21286", - `org.postgresql.test.jdbc3.ParameterMetaDataTest.testTypeChangeWithUnknown[binary = REGULAR]`: "21286", - `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcAutoCommitFalse`: "121375", - `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcAutoCommitTrue`: "121375", - `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithCommitInside`: "121375", - `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithNoTxnControl`: "121375", - `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithRollbackInside`: "121375", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = null]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = unspecified]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = varchar]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = null]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = unspecified]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = varchar]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = null]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = unspecified]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = varchar]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = null]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = unspecified]`: "27793", - `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = varchar]`: "27793", - `org.postgresql.test.jdbc3.TypesTest.testCallableBoolean`: "17511", - `org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.insertAndQueryMultiDimArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.insertAndQueryMultiDimArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytesFromString[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytesFromString[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytes[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytes[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testJsonbArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testJsonbArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testToString[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testToString[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = FORCE]`: "32552", - `org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = REGULAR]`: "32552", - `org.postgresql.test.jdbc4.BlobTest.free()`: "unknown", - `org.postgresql.test.jdbc4.BlobTest.getBinaryStreamWithBoundaries()`: "unknown", - `org.postgresql.test.jdbc4.BlobTest.getBinaryStreamWithBoundaries2()`: "unknown", - `org.postgresql.test.jdbc4.BlobTest.setBlobWithStream()`: "unknown", - `org.postgresql.test.jdbc4.BlobTest.setBlobWithStreamAndLength()`: "unknown", - `org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest.initializationError`: "17511", - `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getColumnsForAutoIncrement()`: "unknown", - `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getFunctionsWithSpecificTypes()`: "100226", - `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getSqlTypes()`: "unknown", - `org.postgresql.test.jdbc4.IsValidTest.testIsValidRemoteClose`: "35897", - `org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=FORCE, stringType=VARCHAR]`: "5807", - `org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=REGULAR, stringType=VARCHAR]`: "5807", - `org.postgresql.test.jdbc4.jdbc41.DriverSupportsClassUnloadingTest.driverUnloadsWhenConnectionClosedExplicitly()`: "unknown", - `org.postgresql.test.jdbc4.jdbc41.DriverSupportsClassUnloadingTest.driverUnloadsWhenConnectionLeaks()`: "unknown", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getArray()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBigDecimal()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBigInteger()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBlob()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBoolean()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBooleanNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBox()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getCalendar()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getCircle()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getClob()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDate()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDouble()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDoubleNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getFloat()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getFloatNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInet4Address()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInet6Address()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInetAddressNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInteger()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getIntegerNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInterval()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getJavaUtilDate()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLine()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLineseg()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLong()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLongNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getNullDate()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getNullTimestamp()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPath()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPoint()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPolygon()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getSerial()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getShort()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getShortNull()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getString()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTime()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTimestamp()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTimestampWithTimeZone()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getUuid()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getXml()`: "21286", - `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatement()`: "unknown", - `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatementAutoCommitFalse()`: "unknown", - `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatementAutoCommitTrue()`: "unknown", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getColumnsForNullScale()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getCorrectSQLTypeForOffPathTypes()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getCorrectSQLTypeForShadowedTypes()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.largeOidIsHandledCorrectly()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversion()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversionThrowsForNegativeLongValues()`: "51480", - `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversionThrowsForTooLargeLongValues()`: "51480", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = FORCE, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.LocalDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = -infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp with time zone, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.LocalDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]`: "41786", - `org.postgresql.test.jdbc42.GetObject310Test.testGetOffsetTime[binary = FORCE]`: "unknown", - `org.postgresql.test.jdbc42.GetObject310Test.testGetOffsetTime[binary = REGULAR]`: "unknown", - `org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArg`: "17511", - `org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArgUnsupportedConversion`: "17511", - `org.postgresql.test.jdbc42.SetObject310InfinityTests.testTimestamp[binary = FORCE]`: "41564", - `org.postgresql.test.jdbc42.SetObject310InfinityTests.testTimestamp[binary = REGULAR]`: "41564", - `org.postgresql.test.jdbc42.SetObject310InfinityTests.testTimestamptz[binary = FORCE]`: "41564", - `org.postgresql.test.jdbc42.SetObject310InfinityTests.testTimestamptz[binary = REGULAR]`: "41564", - `org.postgresql.test.plugin.AuthenticationPluginTest.authPluginMD5()`: "unknown", - `org.postgresql.test.util.PasswordUtilTest.alterUserPasswordSupportsNullEncoding()`: "73337", - `org.postgresql.test.util.PasswordUtilTest.customScramParams()`: "unknown", - `org.postgresql.test.util.PasswordUtilTest.encodePasswordWithServersPasswordEncryption()`: "unknown", - `org.postgresql.test.util.PasswordUtilTest.encryptionTypeValueOfOff()`: "73337", - `org.postgresql.test.util.PasswordUtilTest.encryptionTypeValueOfOn()`: "73337", - `org.postgresql.test.util.PasswordUtilTest.mD5()`: "73337", - `org.postgresql.test.util.PasswordUtilTest.scramSha256()`: "unknown", - `org.postgresql.test.xa.XADataSourceTest.initializationError`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=BOX, oidValue=603]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=BOX_ARRAY, oidValue=1,020]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=CIDR, oidValue=650]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=CIRCLE, oidValue=718]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=JSON, oidValue=114]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=JSON_ARRAY, oidValue=199]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=LINE, oidValue=628]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=LSEG, oidValue=601]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MACADDR, oidValue=829]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MACADDR8, oidValue=774]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MONEY, oidValue=790]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=MONEY_ARRAY, oidValue=791]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=PATH, oidValue=602]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POINT, oidValue=600]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POINT_ARRAY, oidValue=1,017]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=POLYGON, oidValue=604]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=XML, oidValue=142]`: "unknown", + `org.postgresql.core.OidValuesCorrectnessTest.testValue[oidName=XML_ARRAY, oidValue=143]`: "unknown", + `org.postgresql.jdbc.BitFieldTest.TestGetObjectForBitFields`: "unknown", + `org.postgresql.jdbc.BitFieldTest.TestSetBitParameter`: "unknown", + `org.postgresql.jdbc.LargeObjectManagerTest.objectWriteThenRead()`: "unknown", + `org.postgresql.jdbc.LargeObjectManagerTest.openWithErrorAndSubsequentParameterStatusMessageShouldLeaveConnectionInUsableStateAndUpdateParameterStatus()`: "unknown", + `org.postgresql.jdbc.UUIDArrayTest.initializationError`: "32552", + `org.postgresql.replication.CopyBothResponseTest.keedAliveContainsCorrectLSN()`: "unknown", + `org.postgresql.replication.CopyBothResponseTest.openConnectByReplicationProtocol()`: "unknown", + `org.postgresql.replication.CopyBothResponseTest.receiveKeepAliveMessage()`: "unknown", + `org.postgresql.replication.CopyBothResponseTest.receiveXLogData()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationDoNotChangeDuringReceiveMessage()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationDoNotDependOnFlushLocation()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.applyLocationEqualToSetLocation()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.flushLocationDoNotChangeDuringReceiveMessage()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.flushLocationEqualToSetLocation()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.keepAliveServerLSNCanBeUsedToAdvanceFlushLSN()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.lastReceiveLSNCorrectOnView()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.receivedLSNDependentOnProcessMessage()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.sentLocationEqualToLastReceiveLSN()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.statusCanBeSentToBackendAsynchronously()`: "unknown", + `org.postgresql.replication.LogicalReplicationStatusTest.writeLocationCanBeLessThanSendLocation()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.afterCloseConnectionDBSLotStatusNotActive()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.afterCloseReplicationStreamDBSlotStatusNotActive()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.afterStartStreamingDBSlotStatusActive()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.avoidTimeoutDisconnectWithDefaultStatusInterval()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.doesNotHavePendingMessageWhenStartFromLastLSN()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.duringSendBigTransactionConnectionCloseSlotStatusNotActive()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.duringSendBigTransactionReplicationStreamCloseNotActive()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.notAvailableStartNotExistReplicationSlot()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.readActualChangesWithoutBlock()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.readPreviousChangesWithoutBlock()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.receiveChangesAfterStartReplication()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.receiveChangesOccursBeforeStartReplication()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.repeatWalPositionTwice()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.replicationRestartFromLastFeedbackPosition()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.replicationRestartFromLastFeedbackPositionParallelTransaction()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.restartReplicationFromRestartSlotLSNWhenFeedbackAbsent()`: "unknown", + `org.postgresql.replication.LogicalReplicationTest.startFromCurrentServerLSNWithoutSpecifyLSNExplicitly()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.afterCloseReplicationStreamDBSlotStatusNotActive()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.afterStartStreamingDBSlotStatusActive()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.receiveChangesWithReplicationSlot()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.receiveChangesWithoutReplicationSlot()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.restartPhysicalReplicationWithoutRepeatMessage()`: "unknown", + `org.postgresql.replication.PhysicalReplicationTest.walRecordCanBeRepeatBeRestartReplication()`: "unknown", + `org.postgresql.replication.ReplicationConnectionTest.connectionNotValidWhenSessionTerminated()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createLogicalSlot()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createLogicalSlotReturnedInfo()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createPhysicalSlot()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createPhysicalSlotReturnedInfo()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createTemporaryLogicalSlotPg10AndHigher()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.createTemporaryPhysicalSlotPg10AndHigher()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.dropLogicalSlot()`: "unknown", + `org.postgresql.replication.ReplicationSlotTest.dropPhysicalSlot()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.batchExplicitlyEnabled()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.batchWithDefaults()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.batchWithLogServerErrorDetailDisabled()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.withDefaults()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.withExplicitlyEnabled()`: "unknown", + `org.postgresql.test.core.LogServerMessagePropertyTest.withLogServerErrorDetailDisabled()`: "unknown", + `org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = FORCE]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = REGULAR]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = FORCE]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = REGULAR]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = FORCE]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = REGULAR]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = FORCE]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = REGULAR]`: "41405", + `org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = FORCE]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = REGULAR]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = FORCE]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = REGULAR]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = FORCE]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = REGULAR]`: "26925", + `org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]`: "41513", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]`: "41513", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]`: "41513", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = true]`: "41513", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = false]`: "26366", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = true]`: "26366", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = false]`: "26366", + `org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = true]`: "26366", + `org.postgresql.test.jdbc2.BlobTest.initializationError`: "unknown", + `org.postgresql.test.jdbc2.BlobTransactionTest.concurrentReplace()`: "102201", + `org.postgresql.test.jdbc2.CallableStmtTest.testBadStmt`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testBatchCall`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testFetchBeforeExecute`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testFetchWithNoResults`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetArray`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetDouble`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetInt`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetNumeric`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetNumericWithoutArg`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetShort`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetString`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testGetUpdateCount`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testRaiseNotice`: "17511", + `org.postgresql.test.jdbc2.CallableStmtTest.testWasNullBeforeFetch`: "17511", + `org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=false]`: "37129", + `org.postgresql.test.jdbc2.ClientEncodingTest.setEncodingAscii[allowEncodingChanges=true]`: "37129", + `org.postgresql.test.jdbc2.ConnectionTest.createStatement()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.doubleClose()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.getTypeMapEmpty()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.isClosed()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.nativeSQL()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.pGStreamSettings()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.prepareCall()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.prepareStatement()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.readOnly_always()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.readOnly_ignore()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.readOnly_transaction()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.transactionIsolation()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.transactions()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.typeMaps()`: "41578", + `org.postgresql.test.jdbc2.ConnectionTest.warnings()`: "41578", + `org.postgresql.test.jdbc2.CopyTest.changeDateStyle()`: "unknown", + `org.postgresql.test.jdbc2.CopyTest.copyMultiApi()`: "unknown", + `org.postgresql.test.jdbc2.CopyTest.lockReleaseOnCancelFailure()`: "unknown", + `org.postgresql.test.jdbc2.CustomTypeWithBinaryTransferTest.initializationError`: "21286", + `org.postgresql.test.jdbc2.DatabaseMetaDataCacheTest.getSQLTypeQueryCache()`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTest.binary = FORCE`: "32552", + `org.postgresql.test.jdbc2.DatabaseMetaDataTest.binary = REGULAR`: "32552", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[1] 8`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[1] read committed`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[2] 4`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[2] read uncommitted`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[3] 2`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[3] repeatable read`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[4] 1`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.[4] serializable`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.connectionTransactionIsolation()`: "unknown", + `org.postgresql.test.jdbc2.DatabaseMetaDataTransactionIsolationTest.metadataDefaultTransactionIsolation()`: "unknown", + `org.postgresql.test.jdbc2.DateStyleTest.connect[dateStyle=PostgreSQL, shouldPass=false]`: "unknown", + `org.postgresql.test.jdbc2.EnumTest.enumArrayArray[binary = FORCE]`: "51480", + `org.postgresql.test.jdbc2.EnumTest.enumArrayArray[binary = REGULAR]`: "51480", + `org.postgresql.test.jdbc2.EnumTest.enumArray[binary = FORCE]`: "51480", + `org.postgresql.test.jdbc2.EnumTest.enumArray[binary = REGULAR]`: "51480", + `org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGbox[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGcircle[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGline[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGlseg[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpath[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpoint[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.GeometricTest.testPGpolygon[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.JBuilderTest.money()`: "41578", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotify()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithEndlessTimeoutAndMessagesSendAfter()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeout()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndMessagesSendAfter()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.asyncNotifyWithTimeoutAndSocketThatBecomesClosed()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.notifyArgument()`: "unknown", + `org.postgresql.test.jdbc2.NotifyTest.testNotify()`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest.receive100000[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest.sendReceive100000[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = -1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 0.1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 0]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1,000,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1.1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 10,000,000,000,000,000,000,000,000,000,000,000,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 20,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 40,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = FORCE, value = 9,990,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = -1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 0.1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 0]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1,000,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1.1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 10,000,000,000,000,000,000,000,000,000,000,000,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 1]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 20,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 40,000]`: "unknown", + `org.postgresql.test.jdbc2.NumericTransferTest2.sendReceiveValue[binary = REGULAR, value = 9,990,000]`: "unknown", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobjectSubtype[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsPGobject[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = FORCE, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectGetTest.getAsString[binary = REGULAR, sql = null::polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = FORCE, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobjectSubtype[binary = REGULAR, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = FORCE, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = box, type = class org.postgresql.geometric.PGbox]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = circle, type = class org.postgresql.geometric.PGcircle]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = line, type = class org.postgresql.geometric.PGline]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = lseg, type = class org.postgresql.geometric.PGlseg]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = money, type = class org.postgresql.util.PGmoney]`: "41578", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = path, type = class org.postgresql.geometric.PGpath]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = point, type = class org.postgresql.geometric.PGpoint]`: "21286", + `org.postgresql.test.jdbc2.PGObjectSetTest.setNullAsPGobject[binary = REGULAR, sql = polygon, type = class org.postgresql.geometric.PGpolygon]`: "21286", + `org.postgresql.test.jdbc2.ParameterStatusTest.transactionalParametersRollback`: "69396", + `org.postgresql.test.jdbc2.PreparedStatementTest.testBatchWithPrepareThreshold5[binary = REGULAR]`: "5807", + `org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc2.PreparedStatementTest.testDoubleQuestionMark[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = FORCE]`: "36215", + `org.postgresql.test.jdbc2.PreparedStatementTest.testSingleQuotes[binary = REGULAR]`: "36215", + `org.postgresql.test.jdbc2.PreparedStatementTest.testTrailingSpaces[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc2.PreparedStatementTest.testTrailingSpaces[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = FORCE]`: "41779", + `org.postgresql.test.jdbc2.PreparedStatementTest.testUnknownSetObject[binary = REGULAR]`: "41779", + `org.postgresql.test.jdbc2.RefCursorFetchTest.initializationError`: "unknown", + `org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = OTHER, cursorType = 1,111]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testEmptyResult[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = OTHER, cursorType = 1,111]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testMetaData[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = OTHER, cursorType = 1,111]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testResultType[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = OTHER, cursorType = 1,111]`: "17511", + `org.postgresql.test.jdbc2.RefCursorTest.testResult[typeName = REF_CURSOR, cursorType = 2,012]`: "17511", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testCache[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClassesMatch[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testClosedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testColumnDisplaySize[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testComposite[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testDatabaseMetaDataNames[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIdentityColumn[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testIsAutoIncrement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testPreparedUpdate[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testSmallSerialSequenceLikeColumns[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "48532", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testStandardResultSet[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testTimestampInfo[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = 0, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = 0]`: "32565", + `org.postgresql.test.jdbc2.ResultSetMetaDataTest.testUnexecutedStatement[databaseMetadataCacheFields = null, databaseMetadataCacheFieldsMib = null]`: "32565", + `org.postgresql.test.jdbc2.ResultSetTest.testRowResultPositioning[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc2.ResultSetTest.testRowResultPositioning[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.ResultSetTest.testTimestamp[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc2.ResultSetTest.testTimestamp[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.ResultSetTest.testgetBadBoolean[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc2.ResultSetTest.testgetBadBoolean[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc2.SearchPathLookupTest.searchPathNormalLookup()`: "unknown", + `org.postgresql.test.jdbc2.ServerCursorTest.testBinaryFetch`: "41412", + `org.postgresql.test.jdbc2.ServerErrorTest.testCheckConstraint`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testColumn`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testDatatype`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testExclusionConstraint`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testForeignKeyConstraint`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testNotNullConstraint`: "27796", + `org.postgresql.test.jdbc2.ServerErrorTest.testPrimaryKey`: "27796", + `org.postgresql.test.jdbc2.StatementTest.closeInProgressStatement()`: "unknown", + `org.postgresql.test.jdbc2.StatementTest.concurrentWarningReadAndClear()`: "unknown", + `org.postgresql.test.jdbc2.StatementTest.fastCloses()`: "unknown", + `org.postgresql.test.jdbc2.StatementTest.parsingSemiColons()`: "unknown", + `org.postgresql.test.jdbc2.StatementTest.updateCount()`: "unknown", + `org.postgresql.test.jdbc2.StatementTest.warningsAreAvailableAsap()`: "unknown", + `org.postgresql.test.jdbc2.TimeTest.getTimeZone()`: "unknown", + `org.postgresql.test.jdbc2.UpdateableResultTest.testArray`: "26925", + `org.postgresql.test.jdbc2.UpdateableResultTest.testOidUpdatable`: "unknown", + `org.postgresql.test.jdbc3.CompositeTest.complexArgumentSelect()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.complexSelect()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.complexTableNameMetadata()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.compositeFromTable()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.nullArrayElement()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.simpleArgumentSelect()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.simpleSelect()`: "51480", + `org.postgresql.test.jdbc3.CompositeTest.tableMetadata()`: "51480", + `org.postgresql.test.jdbc3.DatabaseMetaDataTest.getColumnsForDomain()`: "27796", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunction`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeFunctionHavingReturnParameter`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallIfNoReturnTest.testInvokeProcedure`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunction`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeFunctionHavingReturnParameter`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeCallTest.testInvokeProcedure`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunction`: "17511", + `org.postgresql.test.jdbc3.EscapeSyntaxCallModeSelectTest.testInvokeFunctionHavingReturnParameter`: "17511", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.allBytes()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.allBytesStream()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytes()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesOffset()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesOffsetStream()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.manyBytesStream()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.pattern()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1Byte()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffset()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteOffsetStream()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.test1ByteStream()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3BlobTest.truncate()`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testAllInOut`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testCall5Times`: "unknown", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithParentheses`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testFunctionNoParametersWithoutParentheses`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBit1WithoutArg`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBit2WithoutArg`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBoolean01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBooleanWithoutArg`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetByte01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetBytes02`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDouble01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetDoubleAsReal`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetInt01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetLong01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectDecimal`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectFloat`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetObjectLongVarchar`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testGetShort01`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testInOut`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testMultipleOutExecutions`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNotEnoughParameters`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testNumeric`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOnlyNativeCall`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureInOutNativeCall`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithParentheses`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testProcedureNoParametersWithoutParentheses`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSetObjectBit`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSomeInOut`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testSum`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testTooManyParameters`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateDecimal`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testUpdateReal`: "121375", + `org.postgresql.test.jdbc3.Jdbc3CallableStatementTest.testVarcharBool`: "121375", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testFailsOnBadIndex[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testFailsOnBadIndex[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testMultiStatement[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testMultiStatement[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testParameterMD[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testParameterMD[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testTypeChangeWithUnknown[binary = FORCE]`: "21286", + `org.postgresql.test.jdbc3.ParameterMetaDataTest.testTypeChangeWithUnknown[binary = REGULAR]`: "21286", + `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcAutoCommitFalse`: "121375", + `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcAutoCommitTrue`: "121375", + `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithCommitInside`: "121375", + `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithNoTxnControl`: "121375", + `org.postgresql.test.jdbc3.ProcedureTransactionTest.testProcWithRollbackInside`: "121375", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = null]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = unspecified]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testMultipleEnumBinds[stringType = varchar]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = null]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = unspecified]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testOtherAsEnum[stringType = varchar]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = null]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = unspecified]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testParameterUnspecified[stringType = varchar]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = null]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = unspecified]`: "27793", + `org.postgresql.test.jdbc3.StringTypeParameterTest.testVarcharAsEnum[stringType = varchar]`: "27793", + `org.postgresql.test.jdbc3.TypesTest.testCallableBoolean`: "17511", + `org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.createNullArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.insertAndQueryMultiDimArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.insertAndQueryMultiDimArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.multiDimIntArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.nullArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinAlias[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingBuiltinNonAlias[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCasingComposite[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBool[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytesFromString[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytesFromString[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytes[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfBytes[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfInt[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiJson[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfMultiString[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfNull[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayOfSmallInt[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithNonStandardDelimiter[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateArrayWithoutServer[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreateEmptyArrayOfIntViaAlias[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testCreatePrimitiveArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testEvilCasing[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testGetArrayOfComposites[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testJsonbArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testJsonbArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testSetObjectFromJavaArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testToString[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testToString[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = FORCE]`: "32552", + `org.postgresql.test.jdbc4.ArrayTest.testUUIDArray[binary = REGULAR]`: "32552", + `org.postgresql.test.jdbc4.BlobTest.free()`: "unknown", + `org.postgresql.test.jdbc4.BlobTest.getBinaryStreamWithBoundaries()`: "unknown", + `org.postgresql.test.jdbc4.BlobTest.getBinaryStreamWithBoundaries2()`: "unknown", + `org.postgresql.test.jdbc4.BlobTest.setBlobWithStream()`: "unknown", + `org.postgresql.test.jdbc4.BlobTest.setBlobWithStreamAndLength()`: "unknown", + `org.postgresql.test.jdbc4.DatabaseMetaDataHideUnprivilegedObjectsTest.initializationError`: "17511", + `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getColumnsForAutoIncrement()`: "unknown", + `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getFunctionsWithSpecificTypes()`: "100226", + `org.postgresql.test.jdbc4.DatabaseMetaDataTest.getSqlTypes()`: "unknown", + `org.postgresql.test.jdbc4.IsValidTest.testIsValidRemoteClose`: "35897", + `org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=FORCE, stringType=VARCHAR]`: "5807", + `org.postgresql.test.jdbc4.UUIDTest.testUUIDString[binary=REGULAR, stringType=VARCHAR]`: "5807", + `org.postgresql.test.jdbc4.jdbc41.DriverSupportsClassUnloadingTest.driverUnloadsWhenConnectionClosedExplicitly()`: "unknown", + `org.postgresql.test.jdbc4.jdbc41.DriverSupportsClassUnloadingTest.driverUnloadsWhenConnectionLeaks()`: "unknown", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getArray()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBigDecimal()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBigInteger()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBlob()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBoolean()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBooleanNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getBox()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getCalendar()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getCircle()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getClob()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDate()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDouble()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getDoubleNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getFloat()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getFloatNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInet4Address()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInet6Address()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInetAddressNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInteger()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getIntegerNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getInterval()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getJavaUtilDate()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLine()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLineseg()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLong()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getLongNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getNullDate()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getNullTimestamp()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPath()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPoint()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getPolygon()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getSerial()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getShort()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getShortNull()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getString()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTime()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTimestamp()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getTimestampWithTimeZone()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getUuid()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.GetObjectTest.getXml()`: "21286", + `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatement()`: "unknown", + `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatementAutoCommitFalse()`: "unknown", + `org.postgresql.test.jdbc4.jdbc41.SchemaTest.searchPathPreparedStatementAutoCommitTrue()`: "unknown", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getColumnsForNullScale()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getCorrectSQLTypeForOffPathTypes()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.getCorrectSQLTypeForShadowedTypes()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.largeOidIsHandledCorrectly()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversion()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversionThrowsForNegativeLongValues()`: "51480", + `org.postgresql.test.jdbc42.DatabaseMetaDataTest.oidConversionThrowsForTooLargeLongValues()`: "51480", + `org.postgresql.test.jdbc42.GetObject310Test.testGetOffsetTime[binary = FORCE]`: "unknown", + `org.postgresql.test.jdbc42.GetObject310Test.testGetOffsetTime[binary = REGULAR]`: "unknown", + `org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArg`: "17511", + `org.postgresql.test.jdbc42.Jdbc42CallableStatementTest.testGetResultSetWithoutArgUnsupportedConversion`: "17511", + `org.postgresql.test.plugin.AuthenticationPluginTest.authPluginMD5()`: "unknown", + `org.postgresql.test.util.PasswordUtilTest.alterUserPasswordSupportsNullEncoding()`: "73337", + `org.postgresql.test.util.PasswordUtilTest.customScramParams()`: "unknown", + `org.postgresql.test.util.PasswordUtilTest.encodePasswordWithServersPasswordEncryption()`: "unknown", + `org.postgresql.test.util.PasswordUtilTest.encryptionTypeValueOfOff()`: "73337", + `org.postgresql.test.util.PasswordUtilTest.encryptionTypeValueOfOn()`: "73337", + `org.postgresql.test.util.PasswordUtilTest.mD5()`: "73337", + `org.postgresql.test.util.PasswordUtilTest.scramSha256()`: "unknown", + `org.postgresql.test.xa.XADataSourceTest.initializationError`: "unknown", } var pgjdbcIgnoreList = blocklist{ diff --git a/pkg/cmd/roachtest/tests/rebalance_load.go b/pkg/cmd/roachtest/tests/rebalance_load.go index 36343751aa73..dc2558108ae7 100644 --- a/pkg/cmd/roachtest/tests/rebalance_load.go +++ b/pkg/cmd/roachtest/tests/rebalance_load.go @@ -99,13 +99,15 @@ func registerRebalanceLoad(r registry.Registry) { mixedversion.ClusterSettingOption( install.ClusterSettingsOption(settings.ClusterSettings), ), - // Multi-tenant deployments are currently unsupported. See #127378. - mixedversion.EnabledDeploymentModes(mixedversion.SystemOnlyDeployment), ) + mvt.OnStartup("maybe enable split/scatter on tenant", + func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { + return enableTenantSplitScatter(l, r, h) + }) mvt.InMixedVersion("rebalance load run", func(ctx context.Context, l *logger.Logger, r *rand.Rand, h *mixedversion.Helper) error { return rebalanceByLoad( - ctx, t, c, rebalanceMode, maxDuration, concurrency, appNode, numStores, numNodes) + ctx, t, l, c, rebalanceMode, maxDuration, concurrency, appNode, numStores, numNodes) }) mvt.Run() } else { @@ -122,7 +124,7 @@ func registerRebalanceLoad(r registry.Registry) { settings.ClusterSettings["server.cpu_profile.cpu_usage_combined_threshold"] = "90" c.Start(ctx, t.L(), startOpts, settings, roachNodes) require.NoError(t, rebalanceByLoad( - ctx, t, c, rebalanceMode, maxDuration, + ctx, t, t.L(), c, rebalanceMode, maxDuration, concurrency, appNode, numStores, numNodes, )) } @@ -193,7 +195,7 @@ func registerRebalanceLoad(r registry.Registry) { Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() { concurrency = 32 - fmt.Printf("lowering concurrency to %d in local testing\n", concurrency) + t.L().Printf("lowering concurrency to %d in local testing", concurrency) } rebalanceLoadRun( ctx, t, c, "leases and replicas", 10*time.Minute, concurrency, true, /* mixedVersion */ @@ -231,6 +233,7 @@ func registerRebalanceLoad(r registry.Registry) { func rebalanceByLoad( ctx context.Context, t test.Test, + l *logger.Logger, c cluster.Cluster, rebalanceMode string, maxDuration time.Duration, @@ -246,10 +249,10 @@ func rebalanceByLoad( splits := (numStores * storeToRangeFactor) - 1 c.Run(ctx, option.WithNodes(appNode), fmt.Sprintf("./cockroach workload init kv --drop --splits=%d {pgurl:1}", splits)) - db := c.Conn(ctx, t.L(), 1) + db := c.Conn(ctx, l, 1) defer db.Close() - require.NoError(t, WaitFor3XReplication(ctx, t, t.L(), db)) + require.NoError(t, WaitFor3XReplication(ctx, t, l, db)) var m *errgroup.Group m, ctx = errgroup.WithContext(ctx) @@ -260,7 +263,7 @@ func rebalanceByLoad( ctx, cancel := context.WithCancel(ctx) m.Go(func() error { - t.L().Printf("starting load generator\n") + l.Printf("starting load generator") err := c.RunE(ctx, option.WithNodes(appNode), fmt.Sprintf( "./cockroach workload run kv --read-percent=95 --tolerate-errors --concurrency=%d "+ "--duration=%v {pgurl:1-%d}", @@ -275,9 +278,9 @@ func rebalanceByLoad( }) m.Go(func() error { - t.Status("checking for CPU balance") + l.Printf("checking for CPU balance") - storeCPUFn, err := makeStoreCPUFn(ctx, c, t, numNodes, numStores) + storeCPUFn, err := makeStoreCPUFn(ctx, t, l, c, numNodes, numStores) if err != nil { return err } @@ -297,18 +300,18 @@ func rebalanceByLoad( now := timeutil.Now() clusterStoresCPU, err := storeCPUFn(ctx) if err != nil { - t.L().Printf("unable to get the cluster stores CPU %s\n", err.Error()) + l.Printf("unable to get the cluster stores CPU: %v", err) continue } var curIsBalanced bool curIsBalanced, reason = isLoadEvenlyDistributed(clusterStoresCPU, meanCPUTolerance) - t.L().Printf("cpu %s", reason) + l.Printf("cpu %s", reason) if !prevIsBalanced && curIsBalanced { balancedStartTime = now } prevIsBalanced = curIsBalanced if prevIsBalanced && now.Sub(balancedStartTime) > stableDuration { - t.Status("successfully achieved CPU balance; waiting for kv to finish running") + l.Printf("successfully achieved CPU balance; waiting for kv to finish running") cancel() return nil } @@ -322,9 +325,9 @@ func rebalanceByLoad( // the cluster stores. When there are multiple stores per node, stores on the // same node will report identical CPU. func makeStoreCPUFn( - octx context.Context, c cluster.Cluster, t test.Test, numNodes, numStores int, + ctx context.Context, t test.Test, l *logger.Logger, c cluster.Cluster, numNodes, numStores int, ) (func(ctx context.Context) ([]float64, error), error) { - adminURLs, err := c.ExternalAdminUIAddr(octx, t.L(), c.Node(1)) + adminURLs, err := c.ExternalAdminUIAddr(ctx, l, c.Node(1)) if err != nil { return nil, err } @@ -342,7 +345,7 @@ func makeStoreCPUFn( return func(ctx context.Context) ([]float64, error) { now := timeutil.Now() resp, err := getMetricsWithSamplePeriod( - ctx, c, t, url, startTime, now, statSamplePeriod, tsQueries) + ctx, c, t, url, install.SystemInterfaceName, startTime, now, statSamplePeriod, tsQueries) if err != nil { return nil, err } diff --git a/pkg/cmd/roachtest/tests/registry.go b/pkg/cmd/roachtest/tests/registry.go index 8554e5b3aff6..35ce41c3c8ec 100644 --- a/pkg/cmd/roachtest/tests/registry.go +++ b/pkg/cmd/roachtest/tests/registry.go @@ -39,6 +39,7 @@ func RegisterTests(r registry.Registry) { registerClockJumpTests(r) registerClockMonotonicTests(r) registerClusterToCluster(r) + registerC2CMixedVersions(r) registerClusterReplicationResilience(r) registerClusterReplicationDisconnect(r) registerConnectionLatencyTest(r) diff --git a/pkg/cmd/roachtest/tests/replicagc.go b/pkg/cmd/roachtest/tests/replicagc.go index ace7e7c2a569..5908118912af 100644 --- a/pkg/cmd/roachtest/tests/replicagc.go +++ b/pkg/cmd/roachtest/tests/replicagc.go @@ -31,7 +31,7 @@ func registerReplicaGC(r registry.Registry) { for _, restart := range []bool{true, false} { r.Add(registry.TestSpec{ Name: fmt.Sprintf("replicagc-changed-peers/restart=%t", restart), - Owner: registry.OwnerReplication, + Owner: registry.OwnerKV, Cluster: r.MakeClusterSpec(6), CompatibleClouds: registry.AllExceptAWS, Suites: registry.Suites(registry.Nightly), diff --git a/pkg/cmd/roachtest/tests/restore.go b/pkg/cmd/roachtest/tests/restore.go index acf6d9f556f4..ee4c96139145 100644 --- a/pkg/cmd/roachtest/tests/restore.go +++ b/pkg/cmd/roachtest/tests/restore.go @@ -856,7 +856,7 @@ func (tpcc tpccRestore) init( MaybeFlag(tpcc.opts.warehouses > 0, "warehouses", tpcc.opts.warehouses). MaybeFlag(tpcc.opts.seed != 0, "seed", tpcc.opts.seed). MaybeFlag(tpcc.opts.fakeTime != 0, "fake-time", tpcc.opts.fakeTime). - Arg(fmt.Sprintf("{pgurl:%d-%d}", crdbNodes[0], crdbNodes[len(crdbNodes)-1])) + Arg("{pgurl:%d-%d}", crdbNodes[0], crdbNodes[len(crdbNodes)-1]) c.Run(ctx, option.WithNodes([]int{sp.getWorkloadNode()}), cmd.String()) } @@ -872,7 +872,7 @@ func (tpcc tpccRestore) run( MaybeFlag(tpcc.opts.seed != 0, "seed", tpcc.opts.seed). MaybeFlag(tpcc.opts.fakeTime != 0, "fake-time", tpcc.opts.fakeTime). MaybeFlag(tpcc.opts.queryTraceFile != "", "query-trace-file", tpcc.opts.queryTraceFile). - Arg(fmt.Sprintf("{pgurl:%d-%d}", crdbNodes[0], crdbNodes[len(crdbNodes)-1])) + Arg("{pgurl:%d-%d}", crdbNodes[0], crdbNodes[len(crdbNodes)-1]) return c.RunE(ctx, option.WithNodes([]int{sp.getWorkloadNode()}), cmd.String()) } diff --git a/pkg/cmd/roachtest/tests/ruby_pg_blocklist.go b/pkg/cmd/roachtest/tests/ruby_pg_blocklist.go index bdd7625952ee..c578038acdb3 100644 --- a/pkg/cmd/roachtest/tests/ruby_pg_blocklist.go +++ b/pkg/cmd/roachtest/tests/ruby_pg_blocklist.go @@ -29,17 +29,8 @@ var rubyPGBlocklist = blocklist{ `Basic type mapping PG::BasicTypeMapForQueries should do hash-as-json encoding`: "unknown", `Basic type mapping PG::BasicTypeMapForQueries should take BinaryData for bytea columns`: "unknown", `Basic type mapping PG::BasicTypeMapForResults can be initialized with a custom type registry`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 0 timestamps per TimestampLocal`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 0 timestamps per TimestampUtc`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 0 timestamps per TimestampUtcToLocal`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 0 timestamps with time zone`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 1 timestamps per TimestampLocal`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 1 timestamps per TimestampUtc`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 1 timestamps per TimestampUtcToLocal`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should convert format 1 timestamps with time zone`: "unknown", `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should do array type conversions`: "unknown", `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should do cidr type conversions`: "unknown", - `Basic type mapping PG::BasicTypeMapForResults connection wide type mapping should do text datetime without time zone type conversions`: "unknown", `GC.compact should compact PG::TypeMapByClass #328`: "unknown", `PG::Connection #get_result should send remaining data before waiting`: "unknown", `PG::Connection accepts nil as the timeout in #wait_for_notify `: "unknown", diff --git a/pkg/cmd/roachtest/tests/split.go b/pkg/cmd/roachtest/tests/split.go index 7fed141e4d86..331e6cfa3f23 100644 --- a/pkg/cmd/roachtest/tests/split.go +++ b/pkg/cmd/roachtest/tests/split.go @@ -427,17 +427,16 @@ func registerLoadSplits(r registry.Registry) { // conditions defined by the params. It checks whether certain number of // splits occur in different workload scenarios. func runLoadSplits(ctx context.Context, t test.Test, c cluster.Cluster, params splitParams) { - // We run this without metamorphic constants as the tests make - // incorrect assumptions about the absolute values of QPS. - // See: https://github.com/cockroachdb/cockroach/issues/112664 - // TODO(DarrylWong): enable metamorphic contants once issue is resolved - settings := install.MakeClusterSettings() - settings.Env = append(settings.Env, "COCKROACH_INTERNAL_DISABLE_METAMORPHIC_TESTING=true") startOpts := option.NewStartOpts(option.NoBackupSchedule) startOpts.RoachprodOpts.ExtraArgs = append(startOpts.RoachprodOpts.ExtraArgs, "--vmodule=split_queue=2,store_rebalancer=2,allocator=2,replicate_queue=2,"+ "decider=3,replica_split_load=1", ) + // This test sets a larger range size than allowed by the default settings. + settings := install.MakeClusterSettings() + if params.maxSize > 8<<30 { + settings.Env = append(settings.Env, fmt.Sprintf("COCKROACH_MAX_RANGE_MAX_BYTES=%d", params.maxSize)) + } c.Start(ctx, t.L(), startOpts, settings, c.CRDBNodes()) m := c.NewMonitor(ctx, c.CRDBNodes()) @@ -474,6 +473,13 @@ func runLoadSplits(ctx context.Context, t test.Test, c cluster.Cluster, params s t.Fatal("no CPU or QPS split threshold set") } + // The default for backpressureRangeHardCap is 8 GiB. + if params.maxSize > 8<<30 { + t.Status("allowing ranges up to ", params.maxSize*2, " bytes") + _, err := db.ExecContext(ctx, fmt.Sprintf("SET CLUSTER SETTING kv.range.range_size_hard_cap = '%d'", params.maxSize*2)) + require.NoError(t, err) + } + t.Status("increasing range_max_bytes") minBytes := 16 << 20 // 16 MB setRangeMaxBytes := func(maxBytes int) { @@ -588,8 +594,16 @@ func runLargeRangeSplits(ctx context.Context, t test.Test, c cluster.Cluster, si rows := size / rowEstimate const minBytes = 16 << 20 // 16 MB + // Set the range max size to a multiple of what we expect the size of the + // bank table to be. This should result in the table fitting inside a single + // range. + rangeMaxSize := 10 * size + numNodes := c.Spec().NodeCount - c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Node(1)) + // This test sets a larger range size than allowed by the default settings. + settings := install.MakeClusterSettings() + settings.Env = append(settings.Env, fmt.Sprintf("COCKROACH_MAX_RANGE_MAX_BYTES=%d", rangeMaxSize)) + c.Start(ctx, t.L(), option.DefaultStartOpts(), settings, c.Node(1)) db := c.Conn(ctx, t.L(), 1) defer db.Close() @@ -627,6 +641,11 @@ func runLargeRangeSplits(ctx context.Context, t test.Test, c cluster.Cluster, si if err := disableLoadBasedSplitting(ctx, db); err != nil { return err } + + // This effectively disables the hard cap. + if _, err := db.ExecContext(ctx, fmt.Sprintf("SET CLUSTER SETTING kv.range.range_size_hard_cap = '%d'", rangeMaxSize*2)); err != nil { + return err + } if _, err := db.ExecContext(ctx, `SET CLUSTER SETTING kv.snapshot_rebalance.max_rate='512MiB'`); err != nil { return err } @@ -635,10 +654,7 @@ func runLargeRangeSplits(ctx context.Context, t test.Test, c cluster.Cluster, si if _, err := db.ExecContext(ctx, `SET CLUSTER SETTING kv.split.mvcc_stats_recomputation.enabled = 'false'`); err != nil { return err } - // Set the range size to a multiple of what we expect the size of the - // bank table to be. This should result in the table fitting - // inside a single range. - setRangeMaxBytes(t, db, minBytes, 10*size) + setRangeMaxBytes(t, db, minBytes, rangeMaxSize) // NB: would probably be faster to use --data-loader=IMPORT here, but IMPORT // will disregard our preference to keep things in a single range. @@ -656,7 +672,7 @@ func runLargeRangeSplits(ctx context.Context, t test.Test, c cluster.Cluster, si // Phase 2: add other nodes, wait for full replication of bank table. t.Status("waiting for full replication") { - c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.Range(2, numNodes)) + c.Start(ctx, t.L(), option.DefaultStartOpts(), settings, c.Range(2, numNodes)) m := c.NewMonitor(ctx, c.All()) // NB: we do a round-about thing of making sure that there's at least one // range that has 3 replicas (rather than waiting that there are no ranges diff --git a/pkg/cmd/roachtest/tests/sqlsmith.go b/pkg/cmd/roachtest/tests/sqlsmith.go index f2e0eac34dbf..16403c1880a1 100644 --- a/pkg/cmd/roachtest/tests/sqlsmith.go +++ b/pkg/cmd/roachtest/tests/sqlsmith.go @@ -321,7 +321,7 @@ WITH into_db = 'defaultdb', unsafe_restore_incompatible_version; sb.WriteString(errStr) sb.WriteString(hintStr) - t.Fatalf(sb.String()) + t.Fatal(sb.String()) } } } diff --git a/pkg/cmd/roachtest/tests/tpcc.go b/pkg/cmd/roachtest/tests/tpcc.go index a171145cf2c2..15272e1e378c 100644 --- a/pkg/cmd/roachtest/tests/tpcc.go +++ b/pkg/cmd/roachtest/tests/tpcc.go @@ -159,7 +159,7 @@ func tpccImportCmdWithCockroachBinary( return roachtestutil.NewCommand("%s workload fixtures import %s", crdbBinary, workloadCmd). MaybeFlag(db != "", "db", db). Flag("warehouses", warehouses). - Arg(strings.Join(extraArgs, " ")). + Arg("%s", strings.Join(extraArgs, " ")). String() } @@ -221,8 +221,8 @@ func setupTPCC( cmd := roachtestutil.NewCommand("%s workload init %s", test.DefaultCockroachPath, opts.getWorkloadCmd()). MaybeFlag(opts.DB != "", "db", opts.DB). Flag("warehouses", opts.Warehouses). - Arg(extraArgs). - Arg("{pgurl:1}") + Arg("%s", extraArgs). + Arg("%s", "{pgurl:1}") c.Run(ctx, option.WithNodes(c.WorkloadNode()), cmd.String()) default: @@ -308,9 +308,9 @@ func runTPCC( Flag("duration", opts.Duration). Flag("prometheus-port", workloadInstances[i].prometheusPort). Flag("pprofport", workloadPProfStartPort+i). - Arg(opts.ExtraRunArgs). - Arg(workloadInstances[i].extraRunArgs). - Arg(pgURLs[i]) + Arg("%s", opts.ExtraRunArgs). + Arg("%s", workloadInstances[i].extraRunArgs). + Arg("%s", pgURLs[i]) err := c.RunE(ctx, option.WithNodes(c.WorkloadNode()), cmd.String()) // Don't fail the test if we are running the workload throughout @@ -433,12 +433,23 @@ func runTPCCMixedHeadroom(ctx context.Context, t test.Test, c cluster.Cluster) { mvt := mixedversion.NewTest( ctx, t, t.L(), c, c.CRDBNodes(), + // We test only upgrades from 23.2 in this test because it uses + // the `workload fixtures import` command, which is only supported + // reliably multi-tenant mode starting from that version. + mixedversion.MinimumSupportedVersion("v23.2.0"), mixedversion.MaxUpgrades(3), - // Multi-tenant deployments are currently unsupported. See #127378. - mixedversion.EnabledDeploymentModes(mixedversion.SystemOnlyDeployment), ) + tenantFeaturesEnabled := make(chan struct{}) + enableTenantFeatures := func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { + defer close(tenantFeaturesEnabled) + return enableTenantSplitScatter(l, rng, h) + } + importTPCC := func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { + l.Printf("waiting for tenant features to be enabled") + <-tenantFeaturesEnabled + randomNode := c.Node(c.CRDBNodes().SeededRandNode(rng)[0]) cmd := tpccImportCmdWithCockroachBinary(test.DefaultCockroachPath, "", "tpcc", headroomWarehouses, fmt.Sprintf("{pgurl%s}", randomNode)) return c.RunE(ctx, option.WithNodes(randomNode), cmd) @@ -448,8 +459,11 @@ func runTPCCMixedHeadroom(ctx context.Context, t test.Test, c cluster.Cluster) { // upgrade machinery, in which a) all ranges are touched and b) work proportional // to the amount data may be carried out. importLargeBank := func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { + l.Printf("waiting for tenant features to be enabled") + <-tenantFeaturesEnabled + randomNode := c.Node(c.CRDBNodes().SeededRandNode(rng)[0]) - cmd := roachtestutil.NewCommand(fmt.Sprintf("%s workload fixtures import bank", test.DefaultCockroachPath)). + cmd := roachtestutil.NewCommand("%s workload fixtures import bank", test.DefaultCockroachPath). Arg("{pgurl%s}", randomNode). Flag("payload-bytes", 10240). Flag("rows", bankRows). @@ -491,7 +505,7 @@ func runTPCCMixedHeadroom(ctx context.Context, t test.Test, c cluster.Cluster) { } checkTPCCWorkload := func(ctx context.Context, l *logger.Logger, rng *rand.Rand, h *mixedversion.Helper) error { - cmd := roachtestutil.NewCommand(fmt.Sprintf("%s workload check tpcc", test.DefaultCockroachPath)). + cmd := roachtestutil.NewCommand("%s workload check tpcc", test.DefaultCockroachPath). Arg("{pgurl:1}"). Flag("warehouses", headroomWarehouses). String() @@ -499,6 +513,7 @@ func runTPCCMixedHeadroom(ctx context.Context, t test.Test, c cluster.Cluster) { } uploadCockroach(ctx, t, c, c.WorkloadNode(), clusterupgrade.CurrentVersion()) + mvt.OnStartup("maybe enable tenant features", enableTenantFeatures) mvt.OnStartup("load TPCC dataset", importTPCC) mvt.OnStartup("load bank dataset", importLargeBank) mvt.InMixedVersion("TPCC workload", runTPCCWorkload) diff --git a/pkg/cmd/roachtest/tests/ts_util.go b/pkg/cmd/roachtest/tests/ts_util.go index e922c7247621..ce314f9f71c7 100644 --- a/pkg/cmd/roachtest/tests/ts_util.go +++ b/pkg/cmd/roachtest/tests/ts_util.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/roachtestutil" "github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test" + "github.com/cockroachdb/cockroach/pkg/roachprod/install" "github.com/cockroachdb/cockroach/pkg/ts/tspb" ) @@ -51,10 +52,11 @@ func mustGetMetrics( c cluster.Cluster, t test.Test, adminURL string, + virtualCluster string, start, end time.Time, tsQueries []tsQuery, ) tspb.TimeSeriesQueryResponse { - response, err := getMetrics(ctx, c, t, adminURL, start, end, tsQueries) + response, err := getMetrics(ctx, c, t, adminURL, virtualCluster, start, end, tsQueries) if err != nil { t.Fatal(err) } @@ -66,10 +68,11 @@ func getMetrics( c cluster.Cluster, t test.Test, adminURL string, + virtualCluster string, start, end time.Time, tsQueries []tsQuery, ) (tspb.TimeSeriesQueryResponse, error) { - return getMetricsWithSamplePeriod(ctx, c, t, adminURL, start, end, defaultSamplePeriod, tsQueries) + return getMetricsWithSamplePeriod(ctx, c, t, adminURL, virtualCluster, start, end, defaultSamplePeriod, tsQueries) } func getMetricsWithSamplePeriod( @@ -77,6 +80,7 @@ func getMetricsWithSamplePeriod( c cluster.Cluster, t test.Test, adminURL string, + virtualCluster string, start, end time.Time, samplePeriod time.Duration, tsQueries []tsQuery, @@ -114,7 +118,10 @@ func getMetricsWithSamplePeriod( Queries: queries, } var response tspb.TimeSeriesQueryResponse - client := roachtestutil.DefaultHTTPClient(c, t.L(), roachtestutil.HTTPTimeout(500*time.Millisecond)) + client := roachtestutil.DefaultHTTPClient( + c, t.L(), roachtestutil.HTTPTimeout(500*time.Millisecond), + roachtestutil.VirtualCluster(virtualCluster), + ) err := client.PostProtobuf(ctx, url, &request, &response) return response, err @@ -134,7 +141,7 @@ func verifyTxnPerSecond( t.Fatal(err) } adminURL := adminUIAddrs[0] - response := mustGetMetrics(ctx, c, t, adminURL, start, end, []tsQuery{ + response := mustGetMetrics(ctx, c, t, adminURL, install.SystemInterfaceName, start, end, []tsQuery{ {name: "cr.node.txn.commits", queryType: rate}, {name: "cr.node.txn.commits", queryType: total}, }) @@ -185,7 +192,7 @@ func verifyLookupsPerSec( t.Fatal(err) } adminURL := adminUIAddrs[0] - response := mustGetMetrics(ctx, c, t, adminURL, start, end, []tsQuery{ + response := mustGetMetrics(ctx, c, t, adminURL, install.SystemInterfaceName, start, end, []tsQuery{ {name: "cr.node.distsender.rangelookups", queryType: rate}, }) diff --git a/pkg/cmd/roachtest/tests/util.go b/pkg/cmd/roachtest/tests/util.go index 18e09ef50b1d..3d1e39c07748 100644 --- a/pkg/cmd/roachtest/tests/util.go +++ b/pkg/cmd/roachtest/tests/util.go @@ -215,9 +215,7 @@ func setAdmissionControl(ctx context.Context, t test.Test, c cluster.Cluster, en // UsingRuntimeAssertions returns true if calls to `t.Cockroach()` for // this test will return the cockroach build with runtime -// assertions. Note that calling this function only makes sense if the -// test uploads cockroach using `t.Cockroach` (instead of calling -// t.StandardCockroach or t.RuntimeAssertionsCockroach directly). +// assertions. func UsingRuntimeAssertions(t test.Test) bool { return t.Cockroach() == t.RuntimeAssertionsCockroach() } diff --git a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go index 19c9408b1a5d..efa5119527fa 100644 --- a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go +++ b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go @@ -12,6 +12,7 @@ package tests import ( "context" + "fmt" "math/rand" "sort" "strings" @@ -28,6 +29,57 @@ import ( "github.com/pmezard/go-difflib/difflib" ) +// validateSystemSchemaTenantVersion is the minimum version after +// which we start ensuring that the schema for a tenant is the same +// whether we upgraded to a version or bootstrapped in it. Prior to +// this version, the check is expected to fail due to #129643. +var validateSystemSchemaTenantVersion = clusterupgrade.MustParseVersion("v24.3.0-alpha.00000000") + +func diff(a, b string) error { + if a == b { + return nil + } + + diffStr, diffErr := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(a), + B: difflib.SplitLines(b), + Context: 5, + }) + + if diffErr != nil { + return errors.Wrap(diffErr, "failed to produce diff") + } + + return fmt.Errorf("diff:\n%s", diffStr) +} + +type tenantSystemSchemaComparison struct { + name string + bootstrapped string + upgraded string +} + +func newTenantSystemSchemaComparison(name string) *tenantSystemSchemaComparison { + return &tenantSystemSchemaComparison{name: name} +} + +func (c tenantSystemSchemaComparison) Diff() error { + if err := diff(c.upgraded, c.bootstrapped); err != nil { + tenantDesc := "system" + if c.name != install.SystemInterfaceName { + tenantDesc = "non-system" + } + + return errors.Newf( + "After upgrading, `USE system; SHOW CREATE ALL TABLES;` "+ + "does not match expected output after version upgrade for %s tenant: %w", + tenantDesc, err, + ) + } + + return nil +} + // This test tests that, after bootstrapping a cluster from a previous // release's binary and upgrading it to the latest version, the `system` // database "contains the expected tables". @@ -37,9 +89,13 @@ func runValidateSystemSchemaAfterVersionUpgrade( ctx context.Context, t test.Test, c cluster.Cluster, ) { // Obtain system table definitions with `SHOW CREATE ALL TABLES` in the SYSTEM db. - obtainSystemSchema := func(ctx context.Context, l *logger.Logger, c cluster.Cluster, node int) string { + obtainSystemSchema := func( + ctx context.Context, l *logger.Logger, c cluster.Cluster, node int, virtualCluster string, + ) string { // Create a connection to the database cluster. - db := c.Conn(ctx, l, node) + db := c.Conn(ctx, l, node, option.VirtualClusterName(virtualCluster)) + defer db.Close() + sqlRunner := sqlutils.MakeSQLRunner(db) // Prepare the SQL query. @@ -60,27 +116,15 @@ func runValidateSystemSchemaAfterVersionUpgrade( return sb.String() } - // expected and actual output of `SHOW CREATE ALL TABLES;`. - var expected, actual string - - // Start a cluster with the latest binary and get the system schema from the - // cluster. - if err := clusterupgrade.StartWithSettings( - ctx, t.L(), c, c.All(), option.DefaultStartOpts(), install.BinaryOption(test.DefaultCockroachPath), - ); err != nil { - t.Fatal(err) - } - expected = obtainSystemSchema(ctx, t.L(), c, 1) - c.Wipe(ctx, c.All()) + systemComparison := newTenantSystemSchemaComparison(install.SystemInterfaceName) + var tenantComparison *tenantSystemSchemaComparison mvt := mixedversion.NewTest(ctx, t, t.L(), c, c.All(), - // Fixtures are generated on a version that's too old for this test. - mixedversion.NeverUseFixtures, // We limit the number of upgrades since the test is not expected to work // on versions older than 22.2. - mixedversion.MaxUpgrades(2), - // Multi-tenant deployments are currently unsupported. See #127378. - mixedversion.EnabledDeploymentModes(mixedversion.SystemOnlyDeployment), + mixedversion.MaxUpgrades(3), + // Fixtures are generated on a version that's too old for this test. + mixedversion.NeverUseFixtures, ) mvt.AfterUpgradeFinalized( "obtain system schema from the upgraded cluster", @@ -91,24 +135,48 @@ func runValidateSystemSchemaAfterVersionUpgrade( return nil } - // Compare whether the two schemas are equal - actual = obtainSystemSchema(ctx, l, c, 1) - if expected != actual { - diff, diffErr := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(expected), - B: difflib.SplitLines(actual), - Context: 5, - }) - if diffErr != nil { - return errors.Wrap(diffErr, "failed to produce diff") - } - return errors.Newf("After upgrading, `USE system; SHOW CREATE ALL TABLES;` "+ - "does not match expected output after version upgrade."+ - "\nDiff:\n%s", diff) + systemComparison.upgraded = obtainSystemSchema(ctx, l, c, 1, systemComparison.name) + if h.IsMultitenant() { + tenantComparison = newTenantSystemSchemaComparison(h.Tenant.Descriptor.Name) + tenantComparison.upgraded = obtainSystemSchema(ctx, l, c, 1, tenantComparison.name) } - l.Printf("validating succeeded:\n%v", expected) + return nil }, ) mvt.Run() + + // Start a cluster with the latest binary and get the system schema + // from the cluster. + c.Wipe(ctx, c.All()) + settings := install.MakeClusterSettings() + + c.Start(ctx, t.L(), option.DefaultStartOpts(), settings) + systemComparison.bootstrapped = obtainSystemSchema(ctx, t.L(), c, 1, systemComparison.name) + + validateTenant := tenantComparison != nil && clusterupgrade.CurrentVersion().AtLeast(validateSystemSchemaTenantVersion) + + if validateTenant { + t.L().Printf("creating shared-process tenant") + startOpts := option.StartSharedVirtualClusterOpts(tenantComparison.name) + c.StartServiceForVirtualCluster(ctx, t.L(), startOpts, settings) + tenantComparison.bootstrapped = obtainSystemSchema(ctx, t.L(), c, 1, tenantComparison.name) + } + + if err := systemComparison.Diff(); err != nil { + t.Fatal(err) + } + t.L().Printf("validation succeeded for system tenant") + + if validateTenant { + if err := tenantComparison.Diff(); err != nil { + t.Fatal(err) + } + + if err := diff(systemComparison.upgraded, tenantComparison.upgraded); err != nil { + t.Fatal(fmt.Errorf("comparing system schema of system and tenant: %w", err)) + } + + t.L().Printf("validation succeeded for non-system tenant") + } } diff --git a/pkg/cmd/roachtest/tests/ycsb.go b/pkg/cmd/roachtest/tests/ycsb.go index 1e1b60a50ee5..36c579c40d88 100644 --- a/pkg/cmd/roachtest/tests/ycsb.go +++ b/pkg/cmd/roachtest/tests/ycsb.go @@ -193,6 +193,8 @@ func enableIsolationLevels(ctx context.Context, t test.Test, db *gosql.DB) error // master, we should keep these to ensure that the settings are configured // properly in mixed-version roachtests. `SET CLUSTER SETTING sql.txn.read_committed_isolation.enabled = 'true';`, + // NOTE: for a similar reason, we use the deprecated name for this setting + // to ensure that it is properly configured in mixed-version roachtests. `SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = 'true';`, } { if _, err := db.ExecContext(ctx, cmd); err != nil { diff --git a/pkg/cmd/roachtest/testselector/selector.go b/pkg/cmd/roachtest/testselector/selector.go index 8d30e86ab1eb..0beae35ab5f4 100644 --- a/pkg/cmd/roachtest/testselector/selector.go +++ b/pkg/cmd/roachtest/testselector/selector.go @@ -55,37 +55,33 @@ type TestDetails struct { // SelectTestsReq is the request for CategoriseTests type SelectTestsReq struct { - ForPastDays int // number of days data to consider for test selection - FirstRunOn int // number of days to consider for the first time the test is run - LastRunOn int // number of days to consider for the last time the test is run - SelectFromSuccessPct int // percentage of tests to be Selected for running from the successful test list sorted by number of runs + ForPastDays int // number of days data to consider for test selection + FirstRunOn int // number of days to consider for the first time the test is run + LastRunOn int // number of days to consider for the last time the test is run Cloud spec.Cloud // the cloud where the tests were run Suite string // the test suite for which the selection is done } // NewDefaultSelectTestsReq returns a new SelectTestsReq with default values populated -func NewDefaultSelectTestsReq( - selectFromSuccessPct int, cloud spec.Cloud, suite string, -) *SelectTestsReq { +func NewDefaultSelectTestsReq(cloud spec.Cloud, suite string) *SelectTestsReq { return &SelectTestsReq{ - ForPastDays: defaultForPastDays, - FirstRunOn: defaultFirstRunOn, - LastRunOn: defaultLastRunOn, - SelectFromSuccessPct: selectFromSuccessPct, - Cloud: cloud, - Suite: suite, + ForPastDays: defaultForPastDays, + FirstRunOn: defaultFirstRunOn, + LastRunOn: defaultLastRunOn, + Cloud: cloud, + Suite: suite, } } -// CategoriseTests returns the tests categorized based on the snowflake query +// CategoriseTests returns the tests categorised based on the snowflake query // The tests are Selected by selector.go based on certain criteria: // 1. the number of time a test has been successfully running // 2. the test is new // 3. the test has not been run for a while // 4. a subset of the successful tests based on SelectTestReq.SelectFromSuccessPct // It returns all the tests. The selected tests have the value TestDetails.Selected as true -func CategoriseTests(ctx context.Context, req *SelectTestsReq) ([]*TestDetails, error) { +func CategoriseTests(ctx context.Context, req *SelectTestsReq) (map[string]*TestDetails, error) { db, err := getConnect(ctx) if err != nil { return nil, err @@ -118,11 +114,8 @@ func CategoriseTests(ctx context.Context, req *SelectTestsReq) ([]*TestDetails, for i := range colPointers { colPointers[i] = &colContainer[i] } - // selectedTestDetails are all the tests that are selected from snowflake query - selectedTestDetails := make([]*TestDetails, 0) - // skipped tests are maintained separately - // this helps in considering them for running based on further select criteria like selectFromSuccessPct - skippedTests := make([]*TestDetails, 0) + // allTestDetails are all the tests that are returned by the snowflake query + allTestDetails := make(map[string]*TestDetails) for rows.Next() { err = rows.Scan(colPointers...) if err != nil { @@ -141,25 +134,9 @@ func CategoriseTests(ctx context.Context, req *SelectTestsReq) ([]*TestDetails, AvgDurationInMillis: getDuration(testInfos[2]), LastFailureIsPreempt: testInfos[3] == "yes", } - if testDetails.Selected { - // selected for running - selectedTestDetails = append(selectedTestDetails, testDetails) - } else { - // skipped based on query - skippedTests = append(skippedTests, testDetails) - } - } - if req.SelectFromSuccessPct > 0 && len(skippedTests) > 0 { - // need to select some tests from the skipped tests - numberOfTestsToSelect := len(skippedTests) * req.SelectFromSuccessPct / 100 - // the tests are sorted by the number of runs. So, simply iterate over the list - // and select the first count of "numberOfTestsToSelect" - for i := 0; i < numberOfTestsToSelect; i++ { - skippedTests[i].Selected = true - } + allTestDetails[testDetails.Name] = testDetails } - // add all the test. The information can be used for further processing - return append(selectedTestDetails, skippedTests...), nil + return allTestDetails, nil } // getDuration extracts the duration from the snowflake query duration field diff --git a/pkg/config/system_test.go b/pkg/config/system_test.go index 1a5f2ea2c707..1e3d33a04f4a 100644 --- a/pkg/config/system_test.go +++ b/pkg/config/system_test.go @@ -566,6 +566,7 @@ func TestGetZoneConfigForKey(t *testing.T) { {roachpb.RKey(keys.TimeseriesPrefix.PrefixEnd()), keys.SystemRangesID}, {roachpb.RKey(keys.TableDataMin), keys.SystemDatabaseID}, {roachpb.RKey(keys.SystemConfigSplitKey), keys.SystemDatabaseID}, + {roachpb.RKey(keys.GracePeriodInitTimestamp), keys.SystemRangesID}, {tkey(keys.ZonesTableID), keys.ZonesTableID}, {roachpb.RKey(keys.SystemZonesTableSpan.Key), keys.ZonesTableID}, diff --git a/pkg/config/zonepb/zone.go b/pkg/config/zonepb/zone.go index fa3529187b46..2c4aefef8f13 100644 --- a/pkg/config/zonepb/zone.go +++ b/pkg/config/zonepb/zone.go @@ -345,11 +345,17 @@ func (z *ZoneConfig) ValidateTandemFields() error { return nil } -// MinRangeMaxBytes is the minimum value for range max bytes. +// minRangeMaxBytes is the minimum value for range max bytes. // The default, 64 MiB, is half of the default range_min_bytes var minRangeMaxBytes = envutil.EnvOrDefaultInt64("COCKROACH_MIN_RANGE_MAX_BYTES", 64<<20 /* 64 MiB */) +// maxRangeMaxBytes is the maximum value for range max bytes. The default, 8 +// GiB, is sixteen times the default range_max_bytes. Also see +// kv.range.range_size_hard_cap. +var maxRangeMaxBytes = envutil.EnvOrDefaultInt64("COCKROACH_MAX_RANGE_MAX_BYTES", + 8<<30 /* 8 GiB */) + func TestingSetMinRangeMaxBytes(v int64) func() { old := minRangeMaxBytes minRangeMaxBytes = v @@ -402,6 +408,12 @@ func (z *ZoneConfig) Validate() error { *z.RangeMaxBytes, minRangeMaxBytes) } + if z.RangeMaxBytes != nil && *z.RangeMaxBytes > maxRangeMaxBytes { + return fmt.Errorf("RangeMaxBytes %d greater than maximum allowed %d", + *z.RangeMaxBytes, maxRangeMaxBytes, + ) + } + if z.RangeMinBytes != nil && *z.RangeMinBytes < 0 { return fmt.Errorf("RangeMinBytes %d less than minimum allowed 0", *z.RangeMinBytes) } diff --git a/pkg/config/zonepb/zone_test.go b/pkg/config/zonepb/zone_test.go index 710a445e681c..0e86fb6b1ea4 100644 --- a/pkg/config/zonepb/zone_test.go +++ b/pkg/config/zonepb/zone_test.go @@ -62,6 +62,13 @@ func TestZoneConfigValidate(t *testing.T) { }, "RangeMaxBytes 0 less than minimum allowed", }, + { + ZoneConfig{ + NumReplicas: proto.Int32(1), + RangeMaxBytes: proto.Int64(9 << 30 /* 9 GiB */), + }, + "RangeMaxBytes 9663676416 greater than maximum allowed 8589934592", + }, { ZoneConfig{ NumReplicas: proto.Int32(1), diff --git a/pkg/geo/geomfn/make_geometry_test.go b/pkg/geo/geomfn/make_geometry_test.go index d68f9f1d992d..f2ed82500535 100644 --- a/pkg/geo/geomfn/make_geometry_test.go +++ b/pkg/geo/geomfn/make_geometry_test.go @@ -251,7 +251,7 @@ func TestMakePolygon(t *testing.T) { } polygon, err := MakePolygon(outer, interior...) if tc.err != nil { - require.Errorf(t, err, tc.err.Error()) + require.Error(t, err, tc.err.Error()) require.EqualError(t, err, tc.err.Error()) } else { require.NoError(t, err) @@ -359,7 +359,7 @@ func TestMakePolygonFromMultiLineString(t *testing.T) { g := geo.MustParseGeometry(tc.g) polygon, err := MakePolygonFromMultiLineString(g, tc.srid) if tc.err != nil { - require.Errorf(t, err, tc.err.Error()) + require.Error(t, err, tc.err.Error()) } else { require.NoError(t, err) expected := geo.MustParseGeometry(tc.expected) diff --git a/pkg/internal/team/TEAMS.yaml b/pkg/internal/team/TEAMS.yaml index 0ca157cfbd54..c24281ad4826 100644 --- a/pkg/internal/team/TEAMS.yaml +++ b/pkg/internal/team/TEAMS.yaml @@ -44,10 +44,6 @@ cockroachdb/kv: cockroachdb/kv-prs: other triage_column_id: 14242655 label: T-kv -cockroachdb/replication: - aliases: - cockroachdb/repl-prs: other - label: T-kv-replication cockroachdb/spatial: triage_column_id: 9487269 label: T-spatial @@ -69,9 +65,9 @@ cockroachdb/test-eng-prs: triage_column_id: 14041337 label: T-testeng cockroachdb/security: - label: T-cross-product-security + label: T-security-engineering cockroachdb/prodsec: - label: T-cross-product-security + label: T-security-engineering cockroachdb/product-security: label: T-product-security cockroachdb/disaster-recovery: diff --git a/pkg/jobs/errors.go b/pkg/jobs/errors.go index 80e992a5af2a..7c99284181bc 100644 --- a/pkg/jobs/errors.go +++ b/pkg/jobs/errors.go @@ -30,6 +30,8 @@ var errRetryJobSentinel = errors.New("retriable job error") // MarkAsRetryJobError marks an error as a retriable job error which // indicates that the registry should retry the job. +// Note that if a job is _not_ in the NonCancelable state, it will _only_ be +// retried if the error has been marked as a retry job error. func MarkAsRetryJobError(err error) error { return errors.Mark(err, errRetryJobSentinel) } @@ -42,8 +44,10 @@ func IsRetryJobError(err error) bool { // Registry does not retry a job that fails due to a permanent error. var errJobPermanentSentinel = errors.New("permanent job error") -// MarkAsPermanentJobError marks an error as a permanent job error, which indicates -// Registry to not retry the job when it fails due to this error. +// MarkAsPermanentJobError marks an error as a permanent job error, which +// indicates Registry to not retry the job when it fails due to this error. +// Note that if a job is in the NonCancelable state, it will always be retried +// _unless_ the error has been marked as permanent job error. func MarkAsPermanentJobError(err error) error { return errors.Mark(err, errJobPermanentSentinel) } diff --git a/pkg/jobs/job_scheduler.go b/pkg/jobs/job_scheduler.go index 6bf1ef6de7c5..002cee2971b6 100644 --- a/pkg/jobs/job_scheduler.go +++ b/pkg/jobs/job_scheduler.go @@ -136,14 +136,14 @@ func (s *jobScheduler) processSchedule( // In particular, it'd be nice to add more time when repeatedly rescheduling // a job. It would also be nice not to log each event. schedule.SetNextRun(s.env.Now().Add(recheckRunningAfter)) - schedule.SetScheduleStatus("delayed due to %d already running", numRunning) + schedule.SetScheduleStatusf("delayed due to %d already running", numRunning) s.metrics.RescheduleWait.Inc(1) return scheduleStorage.Update(ctx, schedule) case jobspb.ScheduleDetails_SKIP: if err := schedule.ScheduleNextRun(); err != nil { return err } - schedule.SetScheduleStatus("rescheduled due to %d already running", numRunning) + schedule.SetScheduleStatusf("rescheduled due to %d already running", numRunning) s.metrics.RescheduleSkip.Inc(1) return scheduleStorage.Update(ctx, schedule) } @@ -432,7 +432,7 @@ func (s *jobScheduler) runDaemon(ctx context.Context, stopper *stop.Stopper) { if err := whenDisabled.withCancelOnDisabled(ctx, &s.Settings.SV, func(ctx context.Context) error { return s.executeSchedules(ctx, maxSchedules) }); err != nil { - log.Errorf(ctx, "error executing schedules: %+v", err) + log.Errorf(ctx, "error executing schedules: %v", err) } } } diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index 1de827c42cc1..40383eafdf0e 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -216,10 +216,11 @@ func (rts *registryTestSuite) setUp(t *testing.T) func() { ManagerDisableJobCreation: true, } args.Knobs.UpgradeManager = &upgradebase.TestingKnobs{ - DontUseJobs: true, - SkipJobMetricsPollingJobBootstrap: true, - SkipUpdateSQLActivityJobBootstrap: true, - SkipMVCCStatisticsJobBootstrap: true, + DontUseJobs: true, + SkipJobMetricsPollingJobBootstrap: true, + SkipUpdateSQLActivityJobBootstrap: true, + SkipMVCCStatisticsJobBootstrap: true, + SkipUpdateTableMetadataCacheBootstrap: true, } args.Knobs.KeyVisualizer = &keyvisualizer.TestingKnobs{SkipJobBootstrap: true} diff --git a/pkg/jobs/jobspb/jobs.proto b/pkg/jobs/jobspb/jobs.proto index f7c27027f4a5..3629ba6970f9 100644 --- a/pkg/jobs/jobspb/jobs.proto +++ b/pkg/jobs/jobspb/jobs.proto @@ -251,6 +251,17 @@ message LogicalReplicationDetails { int32 function_id = 2; } DefaultConflictResolution default_conflict_resolution = 7 [(gogoproto.nullable) = false]; + + bool ignore_cdc_ignored_ttl_deletes = 8 [(gogoproto.customname) = "IgnoreCDCIgnoredTTLDeletes"]; + + enum ApplyMode { + Immediate = 0; + Validated = 1; + } + + ApplyMode mode = 9; + + // Next ID: 10. } message LogicalReplicationProgress { @@ -1332,6 +1343,15 @@ message MVCCStatisticsJobProgress { } +message UpdateTableMetadataCacheDetails {} +message UpdateTableMetadataCacheProgress { + // The time at which the job last started a run. + google.protobuf.Timestamp last_run_time = 1 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; +} + message ImportRollbackDetails { // TableID is the descriptor ID of table that should be rolled back. // @@ -1411,6 +1431,7 @@ message Payload { ImportRollbackDetails import_rollback_details = 46; HistoryRetentionDetails history_retention_details = 47; LogicalReplicationDetails logical_replication_details = 48; + UpdateTableMetadataCacheDetails update_table_metadata_cache_details = 49; } reserved 26; // PauseReason is used to describe the reason that the job is currently paused @@ -1488,6 +1509,7 @@ message Progress { ImportRollbackProgress import_rollback_progress = 34; HistoryRetentionProgress HistoryRetentionProgress = 35; LogicalReplicationProgress LogicalReplication = 36; + UpdateTableMetadataCacheProgress table_metadata_cache = 37; } uint64 trace_id = 21 [(gogoproto.nullable) = false, (gogoproto.customname) = "TraceID", (gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb.TraceID"]; @@ -1527,6 +1549,8 @@ enum Type { IMPORT_ROLLBACK = 25 [(gogoproto.enumvalue_customname) = "TypeImportRollback"]; HISTORY_RETENTION = 26 [(gogoproto.enumvalue_customname) = "TypeHistoryRetention"]; LOGICAL_REPLICATION = 27 [(gogoproto.enumvalue_customname) = "TypeLogicalReplication"]; + AUTO_CREATE_PARTIAL_STATS = 28 [(gogoproto.enumvalue_customname) = "TypeAutoCreatePartialStats"]; + UPDATE_TABLE_METADATA_CACHE = 29 [(gogoproto.enumvalue_customname) = "TypeUpdateTableMetadataCache"]; } message Job { diff --git a/pkg/jobs/jobspb/wrap.go b/pkg/jobs/jobspb/wrap.go index 6a08233cbd6b..af0f8a8daa84 100644 --- a/pkg/jobs/jobspb/wrap.go +++ b/pkg/jobs/jobspb/wrap.go @@ -49,6 +49,7 @@ var ( _ Details = ImportRollbackDetails{} _ Details = HistoryRetentionDetails{} _ Details = LogicalReplicationDetails{} + _ Details = UpdateTableMetadataCacheDetails{} ) // ProgressDetails is a marker interface for job progress details proto structs. @@ -77,6 +78,7 @@ var ( _ ProgressDetails = ImportRollbackProgress{} _ ProgressDetails = HistoryRetentionProgress{} _ ProgressDetails = LogicalReplicationProgress{} + _ ProgressDetails = UpdateTableMetadataCacheProgress{} ) // Type returns the payload's job type and panics if the type is invalid. @@ -139,6 +141,10 @@ func (rs ReplicationStatus) String() string { // running CREATE STATISTICS manually. const AutoStatsName = "__auto__" +// AutoPartialStatsName is the name to use for partial statistics created +// automatically. +const AutoPartialStatsName = "__auto_partial__" + // ImportStatsName is the name to use for statistics created automatically // during import. const ImportStatsName = "__import__" @@ -154,6 +160,7 @@ const MergedStatsName = "__merged__" // AutomaticJobTypes is a list of automatic job types that currently exist. var AutomaticJobTypes = [...]Type{ TypeAutoCreateStats, + TypeAutoCreatePartialStats, TypeAutoSpanConfigReconciliation, TypeAutoSQLStatsCompaction, TypeAutoSchemaTelemetry, @@ -164,6 +171,7 @@ var AutomaticJobTypes = [...]Type{ TypeKeyVisualizer, TypeAutoUpdateSQLActivity, TypeMVCCStatisticsUpdate, + TypeUpdateTableMetadataCache, } // DetailsType returns the type for a payload detail. @@ -183,6 +191,8 @@ func DetailsType(d isPayload_Details) (Type, error) { createStatsName := d.CreateStats.Name if createStatsName == AutoStatsName { return TypeAutoCreateStats, nil + } else if createStatsName == AutoPartialStatsName { + return TypeAutoCreatePartialStats, nil } return TypeCreateStats, nil case *Payload_SchemaChangeGC: @@ -225,6 +235,8 @@ func DetailsType(d isPayload_Details) (Type, error) { return TypeHistoryRetention, nil case *Payload_LogicalReplicationDetails: return TypeLogicalReplication, nil + case *Payload_UpdateTableMetadataCacheDetails: + return TypeUpdateTableMetadataCache, nil default: return TypeUnspecified, errors.Newf("Payload.Type called on a payload with an unknown details type: %T", d) } @@ -253,6 +265,9 @@ var JobDetailsForEveryJobType = map[Type]Details{ TypeAutoCreateStats: CreateStatsDetails{ Name: AutoStatsName, }, + TypeAutoCreatePartialStats: CreateStatsDetails{ + Name: AutoPartialStatsName, + }, TypeSchemaChangeGC: SchemaChangeGCDetails{}, TypeTypeSchemaChange: TypeSchemaChangeDetails{}, TypeReplicationStreamIngestion: StreamIngestionDetails{}, @@ -273,6 +288,7 @@ var JobDetailsForEveryJobType = map[Type]Details{ TypeImportRollback: ImportRollbackDetails{}, TypeHistoryRetention: HistoryRetentionDetails{}, TypeLogicalReplication: LogicalReplicationDetails{}, + TypeUpdateTableMetadataCache: UpdateTableMetadataCacheDetails{}, } // WrapProgressDetails wraps a ProgressDetails object in the protobuf wrapper @@ -336,6 +352,8 @@ func WrapProgressDetails(details ProgressDetails) interface { return &Progress_HistoryRetentionProgress{HistoryRetentionProgress: &d} case LogicalReplicationProgress: return &Progress_LogicalReplication{LogicalReplication: &d} + case UpdateTableMetadataCacheProgress: + return &Progress_TableMetadataCache{TableMetadataCache: &d} default: panic(errors.AssertionFailedf("WrapProgressDetails: unknown progress type %T", d)) } @@ -397,6 +415,8 @@ func (p *Payload) UnwrapDetails() Details { return *d.HistoryRetentionDetails case *Payload_LogicalReplicationDetails: return *d.LogicalReplicationDetails + case *Payload_UpdateTableMetadataCacheDetails: + return *d.UpdateTableMetadataCacheDetails default: return nil } @@ -458,6 +478,8 @@ func (p *Progress) UnwrapDetails() ProgressDetails { return *d.HistoryRetentionProgress case *Progress_LogicalReplication: return *d.LogicalReplication + case *Progress_TableMetadataCache: + return *d.TableMetadataCache default: return nil } @@ -543,6 +565,8 @@ func WrapPayloadDetails(details Details) interface { return &Payload_HistoryRetentionDetails{HistoryRetentionDetails: &d} case LogicalReplicationDetails: return &Payload_LogicalReplicationDetails{LogicalReplicationDetails: &d} + case UpdateTableMetadataCacheDetails: + return &Payload_UpdateTableMetadataCacheDetails{UpdateTableMetadataCacheDetails: &d} default: panic(errors.AssertionFailedf("jobs.WrapPayloadDetails: unknown details type %T", d)) } @@ -578,7 +602,7 @@ const ( func (Type) SafeValue() {} // NumJobTypes is the number of jobs types. -const NumJobTypes = 28 +const NumJobTypes = 30 // ChangefeedDetailsMarshaler allows for dependency injection of // cloud.SanitizeExternalStorageURI to avoid the dependency from this diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index 4d8b7a57169f..c7aed6a7ff84 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -318,6 +318,8 @@ const ( // MVCCStatisticsJobID A static job ID used for the MVCC statistics update // job. MVCCStatisticsJobID = jobspb.JobID(104) + + UpdateTableMetadataCacheJobID = jobspb.JobID(105) ) // MakeJobID generates a new job ID. @@ -1584,7 +1586,7 @@ func (r *Registry) stepThroughStateMachine( log.Errorf(ctx, "%s job %d: stepping through state %s with unexpected error: %+v", jobType, job.ID(), status, jobErr) } } else { - if jobType == jobspb.TypeAutoCreateStats { + if jobType == jobspb.TypeAutoCreateStats || jobType == jobspb.TypeAutoCreatePartialStats { log.VInfof(ctx, 1, "%s job %d: stepping through state %s", jobType, job.ID(), status) } else { log.Infof(ctx, "%s job %d: stepping through state %s", jobType, job.ID(), status) diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index dc9308c3e951..ba96f85e07a6 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -126,9 +126,10 @@ func TestRegistryGC(t *testing.T) { }, UpgradeManager: &upgradebase.TestingKnobs{ // This test wants to look at job records. - DontUseJobs: true, - SkipJobMetricsPollingJobBootstrap: true, - SkipMVCCStatisticsJobBootstrap: true, + DontUseJobs: true, + SkipJobMetricsPollingJobBootstrap: true, + SkipMVCCStatisticsJobBootstrap: true, + SkipUpdateTableMetadataCacheBootstrap: true, }, KeyVisualizer: &keyvisualizer.TestingKnobs{ SkipJobBootstrap: true, @@ -284,10 +285,11 @@ func TestRegistryGCPagination(t *testing.T) { }, UpgradeManager: &upgradebase.TestingKnobs{ // This test wants to count job records. - DontUseJobs: true, - SkipJobMetricsPollingJobBootstrap: true, - SkipUpdateSQLActivityJobBootstrap: true, - SkipMVCCStatisticsJobBootstrap: true, + DontUseJobs: true, + SkipJobMetricsPollingJobBootstrap: true, + SkipUpdateSQLActivityJobBootstrap: true, + SkipMVCCStatisticsJobBootstrap: true, + SkipUpdateTableMetadataCacheBootstrap: true, }, KeyVisualizer: &keyvisualizer.TestingKnobs{ SkipJobBootstrap: true, @@ -758,10 +760,11 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { ManagerDisableJobCreation: true, }, UpgradeManager: &upgradebase.TestingKnobs{ - DontUseJobs: true, - SkipJobMetricsPollingJobBootstrap: true, - SkipUpdateSQLActivityJobBootstrap: true, - SkipMVCCStatisticsJobBootstrap: true, + DontUseJobs: true, + SkipJobMetricsPollingJobBootstrap: true, + SkipUpdateSQLActivityJobBootstrap: true, + SkipMVCCStatisticsJobBootstrap: true, + SkipUpdateTableMetadataCacheBootstrap: true, }, KeyVisualizer: &keyvisualizer.TestingKnobs{ SkipJobBootstrap: true, diff --git a/pkg/jobs/scheduled_job.go b/pkg/jobs/scheduled_job.go index c8c79598f26b..4f355270640b 100644 --- a/pkg/jobs/scheduled_job.go +++ b/pkg/jobs/scheduled_job.go @@ -239,12 +239,14 @@ func (j *ScheduledJob) SetScheduleDetails(details jobspb.ScheduleDetails) { } // SetScheduleStatus sets schedule status. -func (j *ScheduledJob) SetScheduleStatus(fmtOrMsg string, args ...interface{}) { - if len(args) == 0 { - j.rec.ScheduleState.Status = fmtOrMsg - } else { - j.rec.ScheduleState.Status = fmt.Sprintf(fmtOrMsg, args...) - } +func (j *ScheduledJob) SetScheduleStatus(msg string) { + j.rec.ScheduleState.Status = msg + j.markDirty("schedule_state") +} + +// SetScheduleStatusf sets schedule status. +func (j *ScheduledJob) SetScheduleStatusf(format string, args ...interface{}) { + j.rec.ScheduleState.Status = fmt.Sprintf(format, args...) j.markDirty("schedule_state") } diff --git a/pkg/jobs/scheduled_job_executor.go b/pkg/jobs/scheduled_job_executor.go index 8df8a02bc201..95c0fda24155 100644 --- a/pkg/jobs/scheduled_job_executor.go +++ b/pkg/jobs/scheduled_job_executor.go @@ -156,13 +156,13 @@ func RegisterExecutorsMetrics(registry *metric.Registry) error { func DefaultHandleFailedRun(schedule *ScheduledJob, fmtOrMsg string, args ...interface{}) { switch schedule.ScheduleDetails().OnError { case jobspb.ScheduleDetails_RETRY_SOON: - schedule.SetScheduleStatus("retrying: "+fmtOrMsg, args...) + schedule.SetScheduleStatusf("retrying: "+fmtOrMsg, args...) schedule.SetNextRun(schedule.env.Now().Add(retryFailedJobAfter)) // TODO(yevgeniy): backoff case jobspb.ScheduleDetails_PAUSE_SCHED: schedule.Pause() - schedule.SetScheduleStatus("schedule paused: "+fmtOrMsg, args...) + schedule.SetScheduleStatusf("schedule paused: "+fmtOrMsg, args...) case jobspb.ScheduleDetails_RETRY_SCHED: - schedule.SetScheduleStatus("reschedule: "+fmtOrMsg, args...) + schedule.SetScheduleStatusf("reschedule: "+fmtOrMsg, args...) } } diff --git a/pkg/jobs/utils.go b/pkg/jobs/utils.go index d91d97493ddf..098a58b3c3f5 100644 --- a/pkg/jobs/utils.go +++ b/pkg/jobs/utils.go @@ -29,22 +29,9 @@ import ( func RunningJobExists( ctx context.Context, ignoreJobID jobspb.JobID, txn isql.Txn, jobTypes ...jobspb.Type, ) (exists bool, retErr error) { - var typeStrs string - switch len(jobTypes) { - case 0: - return false, errors.AssertionFailedf("must specify job types") - case 1: - typeStrs = fmt.Sprintf("('%s')", jobTypes[0].String()) - case 2: - typeStrs = fmt.Sprintf("('%s', '%s')", jobTypes[0].String(), jobTypes[1].String()) - default: - var s strings.Builder - fmt.Fprintf(&s, "('%s'", jobTypes[0].String()) - for _, typ := range jobTypes[1:] { - fmt.Fprintf(&s, ", '%s'", typ.String()) - } - s.WriteByte(')') - typeStrs = s.String() + typeStrs, err := getJobTypeStrs(jobTypes) + if err != nil { + return false, err } orderBy := " ORDER BY created" @@ -87,6 +74,65 @@ LIMIT 1` return ok && jobspb.JobID(*it.Cur()[0].(*tree.DInt)) != ignoreJobID, nil } +// RunningJobs returns the IDs of all jobs of the given types in the pending, +// running, or paused status, optionally ignoring the job with the ID specified +// by ignoreJobID as well as any jobs created after it, if the passed ID is not +// InvalidJobID. +func RunningJobs( + ctx context.Context, ignoreJobID jobspb.JobID, txn isql.Txn, jobTypes ...jobspb.Type, +) (jobIDs []jobspb.JobID, retErr error) { + typeStrs, err := getJobTypeStrs(jobTypes) + if err != nil { + return jobIDs, err + } + + orderBy := " ORDER BY created" + if ignoreJobID == jobspb.InvalidJobID { + // There is no need to order by the created column if there is no job to + // ignore. + orderBy = "" + } + + stmt := ` +SELECT + id +FROM + system.jobs@jobs_status_created_idx +WHERE + job_type IN ` + typeStrs + ` AND + status IN ` + NonTerminalStatusTupleString + orderBy + it, err := txn.QueryIterator( + ctx, + "find-all-running-jobs-of-type", + txn.KV(), + stmt, + ) + if err != nil { + return nil, err + } + // We have to make sure to close the iterator since we might return from the + // for loop early (before Next() returns false). + defer func() { retErr = errors.CombineErrors(retErr, it.Close()) }() + + for { + ok, err := it.Next(ctx) + if err != nil { + return jobIDs, err + } + if !ok { + break + } + jobID := jobspb.JobID(*it.Cur()[0].(*tree.DInt)) + // If we encounter the jobID to ignore, we can break early since all + // additional rows must be created after the ignored ID. + if jobID == ignoreJobID { + break + } + jobIDs = append(jobIDs, jobID) + } + return jobIDs, nil +} + // JobExists returns true if there is a row corresponding to jobID in the // system.jobs table. func JobExists( @@ -116,3 +162,27 @@ func JobCoordinatorID( } return int32(coordinatorID), nil } + +// getJobTypeStrs is a helper function that returns a string representation of +// the job types for use in SQL queries, such as `('type1', 'type2')`. It +// returns an error if no job types are provided. +func getJobTypeStrs(jobTypes []jobspb.Type) (string, error) { + var typeStrs string + switch len(jobTypes) { + case 0: + return "", errors.AssertionFailedf("must specify job types") + case 1: + typeStrs = fmt.Sprintf("('%s')", jobTypes[0].String()) + case 2: + typeStrs = fmt.Sprintf("('%s', '%s')", jobTypes[0].String(), jobTypes[1].String()) + default: + var s strings.Builder + fmt.Fprintf(&s, "('%s'", jobTypes[0].String()) + for _, typ := range jobTypes[1:] { + fmt.Fprintf(&s, ", '%s'", typ.String()) + } + s.WriteByte(')') + typeStrs = s.String() + } + return typeStrs, nil +} diff --git a/pkg/keys/constants.go b/pkg/keys/constants.go index c5e7dd43f37c..4aea293fd3f7 100644 --- a/pkg/keys/constants.go +++ b/pkg/keys/constants.go @@ -301,6 +301,10 @@ var ( // BootstrapVersionKey is the key at which clusters bootstrapped with a version // > 1.0 persist the version at which they were bootstrapped. BootstrapVersionKey = roachpb.Key(makeKey(SystemPrefix, roachpb.RKey("bootstrap-version"))) + // GracePeriodInitTimestamp is used for license enforcement. It marks the timestamp + // set during cluster initialization, by which a license must be installed to avoid + // throttling. The value is stored as the number of seconds since the Unix epoch. + GracePeriodInitTimestamp = roachpb.Key(makeKey(SystemPrefix, roachpb.RKey("lic-gpi-ts"))) // // NodeIDGenerator is the global node ID generator sequence. NodeIDGenerator = roachpb.Key(makeKey(SystemPrefix, roachpb.RKey("node-idgen"))) diff --git a/pkg/keys/doc.go b/pkg/keys/doc.go index 1563eb9af3d1..e2407920d97f 100644 --- a/pkg/keys/doc.go +++ b/pkg/keys/doc.go @@ -246,14 +246,15 @@ var _ = [...]interface{}{ // 2. System keys: This is where we store global, system data which is // replicated across the cluster. SystemPrefix, - NodeLivenessPrefix, // "\x00liveness-" - BootstrapVersionKey, // "bootstrap-version" - NodeIDGenerator, // "node-idgen" - RangeIDGenerator, // "range-idgen" - StatusPrefix, // "status-" - StatusNodePrefix, // "status-node-" - StoreIDGenerator, // "store-idgen" - StartupMigrationPrefix, // "system-version/" + NodeLivenessPrefix, // "\x00liveness-" + BootstrapVersionKey, // "bootstrap-version" + GracePeriodInitTimestamp, // "lic-gpi-ts" + NodeIDGenerator, // "node-idgen" + RangeIDGenerator, // "range-idgen" + StatusPrefix, // "status-" + StatusNodePrefix, // "status-node-" + StoreIDGenerator, // "store-idgen" + StartupMigrationPrefix, // "system-version/" // StartupMigrationLease, // "system-version/lease" - removed in 23.1 TimeseriesPrefix, // "tsd" SystemSpanConfigPrefix, // "xffsys-scfg" diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index 54243cf9cf38..05c49c375c98 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -57,14 +57,14 @@ func TestDuplicateHandling(t *testing.T) { s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - expectRevisionCount := func(startKey roachpb.Key, endKey roachpb.Key, count int) { + expectRevisionCount := func(startKey roachpb.Key, endKey roachpb.Key, count int, exportStartTime hlc.Timestamp) { req := &kvpb.ExportRequest{ RequestHeader: kvpb.RequestHeader{ Key: startKey, EndKey: endKey, }, MVCCFilter: kvpb.MVCCFilter_All, - StartTime: hlc.Timestamp{}, + StartTime: exportStartTime, } header := kvpb.Header{Timestamp: s.Clock().Now()} resp, err := kv.SendWrappedWith(ctx, @@ -99,11 +99,12 @@ func TestDuplicateHandling(t *testing.T) { type keyBuilder func(i int, ts int) storage.MVCCKey type testCase struct { - name string - skipDuplicates bool - ingestAll bool - addKeys func(*testing.T, *bulk.SSTBatcher, keyBuilder) storage.MVCCKey - expectedCount int + name string + skipDuplicates bool + ingestAll bool + addKeys func(*testing.T, *bulk.SSTBatcher, keyBuilder) storage.MVCCKey + expectedCount int + exportStartTime hlc.Timestamp } testCases := []testCase{ { @@ -122,6 +123,9 @@ func TestDuplicateHandling(t *testing.T) { { name: "ingestAll does not error on key-value matches at different timestamps", ingestAll: true, + // Set the export startTime to ensure all revisions are read, or fail if + // the gc threshold has advance past the start time + exportStartTime: hlc.Timestamp{WallTime: int64(tsStart) - 1}, addKeys: func(t *testing.T, b *bulk.SSTBatcher, k keyBuilder) storage.MVCCKey { for i := 0; i < keyCount; i++ { require.NoError(t, b.AddMVCCKey(ctx, k(i+1, tsStart+1), value)) @@ -134,6 +138,9 @@ func TestDuplicateHandling(t *testing.T) { { name: "ingestAll does not error on key matches at different timestamps", ingestAll: true, + // Set the export startTime to ensure all revisions are read, or fail if + // the gc threshold has advance past the start time + exportStartTime: hlc.Timestamp{WallTime: int64(tsStart) - 1}, addKeys: func(t *testing.T, b *bulk.SSTBatcher, k keyBuilder) storage.MVCCKey { for i := 0; i < keyCount; i++ { require.NoError(t, b.AddMVCCKey(ctx, k(i+1, tsStart+1), value)) @@ -208,7 +215,7 @@ func TestDuplicateHandling(t *testing.T) { endKey := tc.addKeys(t, b, k) if tc.expectedCount > 0 { require.NoError(t, b.Flush(ctx)) - expectRevisionCount(k(0, tsStart).Key, endKey.Key, tc.expectedCount) + expectRevisionCount(k(0, tsStart).Key, endKey.Key, tc.expectedCount, tc.exportStartTime) } }) } diff --git a/pkg/kv/kvclient/kvcoord/BUILD.bazel b/pkg/kv/kvclient/kvcoord/BUILD.bazel index a4268d75f497..aea076f63278 100644 --- a/pkg/kv/kvclient/kvcoord/BUILD.bazel +++ b/pkg/kv/kvclient/kvcoord/BUILD.bazel @@ -70,6 +70,7 @@ go_library( "//pkg/util/future", "//pkg/util/grpcutil", "//pkg/util/hlc", + "//pkg/util/humanizeutil", "//pkg/util/iterutil", "//pkg/util/limit", "//pkg/util/log", diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 41b2f39fa521..d4a489d950ca 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -2551,7 +2551,7 @@ func (ds *DistSender) sendToReplicas( // First order by latency, then move the leaseholder to the front of the // list, if it is known. if !ds.dontReorderReplicas { - replicas.OptimizeReplicaOrder(ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) + replicas.OptimizeReplicaOrder(ctx, ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) } idx := -1 @@ -2571,8 +2571,8 @@ func (ds *DistSender) sendToReplicas( case kvpb.RoutingPolicy_NEAREST: // Order by latency. - log.VEvent(ctx, 2, "routing to nearest replica; leaseholder not required") - replicas.OptimizeReplicaOrder(ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) + replicas.OptimizeReplicaOrder(ctx, ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) + log.VEventf(ctx, 2, "routing to nearest replica; leaseholder not required order=%v", replicas) default: log.Fatalf(ctx, "unknown routing policy: %s", ba.RoutingPolicy) diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed.go b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed.go index 100eec353dfb..81d6293a8ada 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed.go @@ -646,7 +646,7 @@ func newTransportForRange( if err != nil { return nil, err } - replicas.OptimizeReplicaOrder(ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) + replicas.OptimizeReplicaOrder(ctx, ds.st, ds.nodeIDGetter(), ds.healthFunc, ds.latencyFunc, ds.locality) opts := SendOptions{class: defRangefeedConnClass} return ds.transportFactory(opts, replicas), nil } diff --git a/pkg/kv/kvclient/kvcoord/replica_slice.go b/pkg/kv/kvclient/kvcoord/replica_slice.go index dcbfda6ec6d1..bc2a164b49b6 100644 --- a/pkg/kv/kvclient/kvcoord/replica_slice.go +++ b/pkg/kv/kvclient/kvcoord/replica_slice.go @@ -19,9 +19,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/shuffle" "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" ) // ReplicaInfo extends the Replica structure with the associated node @@ -41,6 +43,25 @@ type ReplicaInfo struct { // A ReplicaSlice is a slice of ReplicaInfo. type ReplicaSlice []ReplicaInfo +func (rs ReplicaSlice) String() string { + return redact.StringWithoutMarkers(rs) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (rs ReplicaSlice) SafeFormat(w redact.SafePrinter, _ rune) { + var buf redact.StringBuilder + buf.Print("[") + for i, r := range rs { + if i > 0 { + buf.Print(",") + } + buf.Printf("%v(health=%v match=%d latency=%v)", + r, r.healthy, r.tierMatchLength, humanizeutil.Duration(r.latency)) + } + buf.Print("]") + w.Print(buf) +} + // ReplicaSliceFilter controls which kinds of replicas are to be included in // the slice for routing BatchRequests to. type ReplicaSliceFilter int @@ -199,6 +220,7 @@ type HealthFunc func(roachpb.NodeID) bool // leaseholder is known by the caller, the caller will move it to the // front if appropriate. func (rs ReplicaSlice) OptimizeReplicaOrder( + ctx context.Context, st *cluster.Settings, nodeID roachpb.NodeID, healthFn HealthFunc, @@ -208,6 +230,7 @@ func (rs ReplicaSlice) OptimizeReplicaOrder( // If we don't know which node we're on or its locality, and we don't have // latency information to other nodes, send the RPCs randomly. if nodeID == 0 && latencyFn == nil && len(locality.Tiers) == 0 { + log.VEvent(ctx, 2, "randomly shuffling replicas to route to") shuffle.Shuffle(rs) return } diff --git a/pkg/kv/kvclient/kvcoord/replica_slice_test.go b/pkg/kv/kvclient/kvcoord/replica_slice_test.go index 955567882cbb..02e787d23c86 100644 --- a/pkg/kv/kvclient/kvcoord/replica_slice_test.go +++ b/pkg/kv/kvclient/kvcoord/replica_slice_test.go @@ -327,7 +327,7 @@ func TestReplicaSliceOptimizeReplicaOrder(t *testing.T) { } // Randomize the input order, as it's not supposed to matter. shuffle.Shuffle(test.slice) - test.slice.OptimizeReplicaOrder(st, test.nodeID, healthFn, latencyFn, test.locality) + test.slice.OptimizeReplicaOrder(context.Background(), st, test.nodeID, healthFn, latencyFn, test.locality) var sortedNodes []roachpb.NodeID sortedNodes = append(sortedNodes, test.slice[0].NodeID) for i := 1; i < len(test.slice); i++ { diff --git a/pkg/kv/kvclient/kvtenant/BUILD.bazel b/pkg/kv/kvclient/kvtenant/BUILD.bazel index e53c0af02985..96bed8c6b5fc 100644 --- a/pkg/kv/kvclient/kvtenant/BUILD.bazel +++ b/pkg/kv/kvclient/kvtenant/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "//pkg/kv/kvclient/kvcoord", "//pkg/kv/kvclient/rangecache", "//pkg/kv/kvpb", + "//pkg/multitenant/mtinfo", "//pkg/multitenant/mtinfopb", "//pkg/multitenant/tenantcapabilities", "//pkg/multitenant/tenantcapabilities/tenantcapabilitiespb", diff --git a/pkg/kv/kvclient/kvtenant/connector.go b/pkg/kv/kvclient/kvtenant/connector.go index 0c276bace4aa..4ac19c90f691 100644 --- a/pkg/kv/kvclient/kvtenant/connector.go +++ b/pkg/kv/kvclient/kvtenant/connector.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfo" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitiespb" @@ -76,6 +77,11 @@ type Connector interface { // an update channel to track changes TenantInfo() (tenantcapabilities.Entry, <-chan struct{}) + // ReadFromTenantInfoAccessor allows retrieving the other tenant, if any, from + // which the calling tenant should configure itself to read, along with the + // latest timestamp at which it should perform such reads at this time. + mtinfo.ReadFromTenantInfoAccessor + // NodeDescStore provides information on each of the KV nodes in the cluster // in the form of NodeDescriptors and StoreDescriptors. This obviates the // need for SQL-only tenant servers to join the cluster-wide gossip diff --git a/pkg/kv/kvclient/kvtenant/setting_overrides.go b/pkg/kv/kvclient/kvtenant/setting_overrides.go index 4d0cb1d38c96..d73164b1d250 100644 --- a/pkg/kv/kvclient/kvtenant/setting_overrides.go +++ b/pkg/kv/kvclient/kvtenant/setting_overrides.go @@ -18,7 +18,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/errorspb" @@ -197,6 +200,27 @@ func (c *connector) TenantInfo() (tenantcapabilities.Entry, <-chan struct{}) { }, c.metadataMu.notifyCh } +// ReadFromTenantInfo allows retrieving the other tenant, if any, from which the +// calling tenant should configure itself to read, along with the latest +// timestamp at which it should perform such reads at this time. +func (c *connector) ReadFromTenantInfo( + ctx context.Context, +) (roachpb.TenantID, hlc.Timestamp, error) { + if c.tenantID.IsSystem() { + return roachpb.TenantID{}, hlc.Timestamp{}, nil + } + + client, err := c.getClient(ctx) + if err != nil { + return roachpb.TenantID{}, hlc.Timestamp{}, err + } + resp, err := client.ReadFromTenantInfo(ctx, &serverpb.ReadFromTenantInfoRequest{TenantID: c.tenantID}) + if err != nil { + return roachpb.TenantID{}, hlc.Timestamp{}, err + } + return resp.ReadFrom, resp.ReadAt, nil +} + // processSettingsEvent updates the setting overrides based on the event. func (c *connector) processSettingsEvent( ctx context.Context, e *kvpb.TenantSettingsEvent, diff --git a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go index 993093f63375..a2643df1a4df 100644 --- a/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go +++ b/pkg/kv/kvclient/rangefeed/rangefeed_external_test.go @@ -1637,9 +1637,9 @@ func (c *channelSink) Context() context.Context { return c.ctx } -func (c *channelSink) SendIsThreadSafe() {} +func (c *channelSink) SendUnbufferedIsThreadSafe() {} -func (c *channelSink) Send(e *kvpb.RangeFeedEvent) error { +func (c *channelSink) SendUnbuffered(e *kvpb.RangeFeedEvent) error { select { case c.ch <- e: return nil diff --git a/pkg/kv/kvpb/api.go b/pkg/kv/kvpb/api.go index b5c83840f60f..c86313e79911 100644 --- a/pkg/kv/kvpb/api.go +++ b/pkg/kv/kvpb/api.go @@ -2527,13 +2527,13 @@ func (s *ScanStats) String() string { type RangeFeedEventSink interface { // Context returns the context for this stream. Context() context.Context - // Send blocks until it sends the RangeFeedEvent, the stream is done, or the - // stream breaks. Send must be safe to call on the same stream in different - // goroutines. - Send(*RangeFeedEvent) error - // SendIsThreadSafe is a no-op declaration method. It is a contract that the - // interface has a thread-safe Send method. - SendIsThreadSafe() + // SendUnbuffered blocks until it sends the RangeFeedEvent, the stream is + // done, or the stream breaks. Send must be safe to call on the same stream in + // different goroutines. + SendUnbuffered(*RangeFeedEvent) error + // SendUnbufferedIsThreadSafe is a no-op declaration method. It is a contract + // that the interface has a thread-safe Send method. + SendUnbufferedIsThreadSafe() } // RangeFeedEventProducer is an adapter for receiving rangefeed events with either diff --git a/pkg/kv/kvpb/api.proto b/pkg/kv/kvpb/api.proto index 2e668ff73602..51e7637d3eb5 100644 --- a/pkg/kv/kvpb/api.proto +++ b/pkg/kv/kvpb/api.proto @@ -1377,8 +1377,8 @@ message QueryTxnRequest { // A QueryTxnResponse is the return value from the QueryTxn() method. message QueryTxnResponse { ResponseHeader header = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; - // Contains the current state of the queried transaction. If the queried - // transaction record does not exist, this will be empty. + // Contains the current state of the queried transaction. If a transaction + // record is not found, one will be synthesized and returned. Transaction queried_txn = 2 [(gogoproto.nullable) = false]; // txn_record_exists is set if the queried_txn comes from a transaction record // read from the database. If not set, then the txn record was "synthesized". diff --git a/pkg/kv/kvserver/BUILD.bazel b/pkg/kv/kvserver/BUILD.bazel index c926da290a37..8ba2b2009d7d 100644 --- a/pkg/kv/kvserver/BUILD.bazel +++ b/pkg/kv/kvserver/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "debug_print.go", "doc.go", "flow_control_integration.go", + "flow_control_raft.go", "flow_control_raft_transport.go", "flow_control_replica.go", "flow_control_replica_integration.go", @@ -146,6 +147,8 @@ go_library( "//pkg/kv/kvserver/kvflowcontrol/kvflowdispatch", "//pkg/kv/kvserver/kvflowcontrol/kvflowhandle", "//pkg/kv/kvserver/kvflowcontrol/node_rac2", + "//pkg/kv/kvserver/kvflowcontrol/rac2", + "//pkg/kv/kvserver/kvflowcontrol/replica_rac2", "//pkg/kv/kvserver/kvserverbase", "//pkg/kv/kvserver/kvserverpb", "//pkg/kv/kvserver/kvstorage", @@ -416,6 +419,7 @@ go_test( "//pkg/kv/kvserver/kvflowcontrol/kvflowdispatch", "//pkg/kv/kvserver/kvflowcontrol/kvflowinspectpb", "//pkg/kv/kvserver/kvflowcontrol/node_rac2", + "//pkg/kv/kvserver/kvflowcontrol/replica_rac2", "//pkg/kv/kvserver/kvserverbase", "//pkg/kv/kvserver/kvserverpb", "//pkg/kv/kvserver/kvstorage", @@ -439,10 +443,12 @@ go_test( "//pkg/kv/kvserver/spanset", "//pkg/kv/kvserver/split", "//pkg/kv/kvserver/stateloader", + "//pkg/kv/kvserver/storeliveness", "//pkg/kv/kvserver/tenantrate", "//pkg/kv/kvserver/tscache", "//pkg/kv/kvserver/txnwait", "//pkg/kv/kvserver/uncertainty", + "//pkg/multitenant", "//pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer", "//pkg/raft", "//pkg/raft/confchange", diff --git a/pkg/kv/kvserver/allocator/allocator2/constraint_test.go b/pkg/kv/kvserver/allocator/allocator2/constraint_test.go index ee002dbf4698..899599db3ecf 100644 --- a/pkg/kv/kvserver/allocator/allocator2/constraint_test.go +++ b/pkg/kv/kvserver/allocator/allocator2/constraint_test.go @@ -37,7 +37,7 @@ func parseConstraint(t *testing.T, field string) roachpb.Constraint { case '-': typ = roachpb.Constraint_PROHIBITED default: - t.Fatalf(fmt.Sprintf("unexpected start of field %s", field)) + t.Fatalf("unexpected start of field %s", field) } kv := strings.Split(field[1:], "=") if len(kv) != 2 { diff --git a/pkg/kv/kvserver/client_manual_proposal_test.go b/pkg/kv/kvserver/client_manual_proposal_test.go index 6c7dfe6ccda3..4e48551592f6 100644 --- a/pkg/kv/kvserver/client_manual_proposal_test.go +++ b/pkg/kv/kvserver/client_manual_proposal_test.go @@ -261,7 +261,7 @@ LIMIT type wgSyncCallback sync.WaitGroup func (w *wgSyncCallback) OnLogSync( - ctx context.Context, messages []raftpb.Message, stats storage.BatchCommitStats, + context.Context, logstore.MsgStorageAppendDone, storage.BatchCommitStats, ) { (*sync.WaitGroup)(w).Done() } diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 30dc9be59419..e4be7c0a8e9f 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -2486,6 +2486,7 @@ func TestStoreReplicaGCAfterMerge(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), + nil, /* PiggybackedAdmittedResponseScheduler */ nil, /* knobs */ ) errChan := errorChannelTestHandler(make(chan *kvpb.Error, 1)) diff --git a/pkg/kv/kvserver/client_protectedts_test.go b/pkg/kv/kvserver/client_protectedts_test.go index 8f09bf171aee..49c89fcffbd1 100644 --- a/pkg/kv/kvserver/client_protectedts_test.go +++ b/pkg/kv/kvserver/client_protectedts_test.go @@ -208,7 +208,7 @@ ORDER BY raw_start_key ASC LIMIT 1`) // timestamp of our record at the latest. trace, _, err = s.Enqueue(ctx, "mvccGC", repl, true /* skipShouldQueue */, false /* async */) require.NoError(t, err) - require.Regexp(t, "(?s)done with GC evaluation for 0 keys", trace.String()) + require.Regexp(t, "(?s)handled \\d+ incoming point keys; deleted \\d+", trace.String()) thresh := thresholdFromTrace(trace) require.Truef(t, thresh.Less(ptsRec.Timestamp), "threshold: %v, protected %v %q", thresh, ptsRec.Timestamp, trace) diff --git a/pkg/kv/kvserver/client_raft_log_queue_test.go b/pkg/kv/kvserver/client_raft_log_queue_test.go index 727696b76e9f..ee495c2792e0 100644 --- a/pkg/kv/kvserver/client_raft_log_queue_test.go +++ b/pkg/kv/kvserver/client_raft_log_queue_test.go @@ -39,6 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" + "github.com/cockroachdb/pebble/vfs" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ) @@ -341,16 +342,15 @@ func TestCrashWhileTruncatingSideloadedEntries(t *testing.T) { } } } - memFS.SetIgnoreSyncs(true) + crashFS := memFS.CrashClone(vfs.CrashCloneCfg{}) info(follower, "follower") t.Log("CRASH!") // TODO(pavelkalinnikov): add "crash" helpers to the TestCluster. tc.StopServer(1) t.Log("restarting follower") - memFS.ResetToSyncedState() - memFS.SetIgnoreSyncs(false) - t.Logf("FS after restart:\n%s", memFS.String()) + vfsReg.Set("auto-node2-store1", crashFS) + t.Logf("FS after restart:\n%s", crashFS.String()) require.NoError(t, tc.RestartServer(1)) // Update the follower variable to point at a newly restarted replica. diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 43f8d8185fd9..6c9263798316 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -3443,6 +3443,7 @@ func TestReplicaGCRace(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), + nil, /* PiggybackedAdmittedResponseScheduler */ nil, /* knobs */ ) errChan := errorChannelTestHandler(make(chan *kvpb.Error, 1)) @@ -3837,6 +3838,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), + nil, /* PiggybackedAdmittedResponseScheduler */ nil, /* knobs */ ) errChan := errorChannelTestHandler(make(chan *kvpb.Error, 1)) diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go index 2aa8376826f1..df479beebb7c 100644 --- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -466,9 +466,9 @@ func (s *dummyStream) Context() context.Context { return s.ctx } -func (s *dummyStream) SendIsThreadSafe() {} +func (s *dummyStream) SendUnbufferedIsThreadSafe() {} -func (s *dummyStream) Send(ev *kvpb.RangeFeedEvent) error { +func (s *dummyStream) SendUnbuffered(ev *kvpb.RangeFeedEvent) error { if ev.Val == nil && ev.Error == nil { return nil } @@ -495,7 +495,7 @@ func waitReplicaRangeFeed( event.SetValue(&kvpb.RangeFeedError{ Error: *err, }) - return stream.Send(&event) + return stream.SendUnbuffered(&event) } err := r.RangeFeed(req, stream, nil /* pacer */) diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index baae163a8f8c..5a69a65a1233 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -4380,7 +4380,7 @@ func TestProposalOverhead(t *testing.T) { // affect the memory held by the caller because neither `args` nor // `args.Cmd` are pointers. args.Cmd.WriteBatch = nil - t.Logf(pretty.Sprint(args.Cmd)) + t.Log(pretty.Sprint(args.Cmd)) return nil } tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 75377d2f6faa..ac5636f53cbb 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -808,12 +808,19 @@ func TestStoreRangeSplitMergeStats(t *testing.T) { start := s.Clock().Now() // Get the range stats now that we have data. + // NB: need raft lock so that in-mem and disk stats can be expected to match. + // See: https://github.com/cockroachdb/cockroach/issues/129601#issuecomment-2309865742 + repl.RaftLock() + replMS := repl.GetMVCCStats() snap := store.TODOEngine().NewSnapshot() defer snap.Close() + repl.RaftUnlock() + ms, err := stateloader.Make(repl.RangeID).LoadMVCCStats(ctx, snap) require.NoError(t, err) assertRecomputedStats(t, "before split", snap, repl.Desc(), ms, start.WallTime) - require.Equal(t, repl.GetMVCCStats(), ms, "in-memory and on-disk stats diverge") + + require.Equal(t, replMS, ms, "in-memory and on-disk stats diverge") // Split the range at approximate halfway point. // Call AdminSplit on the replica directly so that we can pass a diff --git a/pkg/kv/kvserver/client_tenant_test.go b/pkg/kv/kvserver/client_tenant_test.go index c971e7ac0bf3..35ff7906fb2a 100644 --- a/pkg/kv/kvserver/client_tenant_test.go +++ b/pkg/kv/kvserver/client_tenant_test.go @@ -20,6 +20,7 @@ import ( "regexp" "strconv" "strings" + "sync/atomic" "testing" "time" @@ -31,6 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tenantrate" + "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -42,6 +44,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/quotapool" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -168,12 +171,29 @@ func TestTenantRateLimiter(t *testing.T) { t0 := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) timeSource := timeutil.NewManualTime(t0) + // This test shouldn't take forever. If we're going to fail, better to + // do it in minutes than in an hour. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + // We're testing the server-side tenant rate limiter, but there is also a tenant-side one. + // That one actually throttles too in this test (making it take 10+s) unless we work around that. + ctx = multitenant.WithTenantCostControlExemption(ctx) + + var numAcquired atomic.Int32 + acqFunc := func( + ctx context.Context, poolName string, r quotapool.Request, start time.Time, + ) { + numAcquired.Add(1) + } s, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ TenantRateKnobs: tenantrate.TestingKnobs{ - TimeSource: timeSource, + QuotaPoolOptions: []quotapool.Option{ + quotapool.WithTimeSource(timeSource), + quotapool.OnAcquisition(acqFunc), + }, }, }, KeyVisualizer: &keyvisualizer.TestingKnobs{SkipJobBootstrap: true}, @@ -186,7 +206,6 @@ func TestTenantRateLimiter(t *testing.T) { }, }, }) - ctx := context.Background() tenantID := serverutils.TestTenantID() ts, err := s.TenantController().StartTenant(ctx, base.TestTenantArgs{ TenantID: tenantID, @@ -230,54 +249,120 @@ func TestTenantRateLimiter(t *testing.T) { cfg := tenantrate.ConfigFromSettings(&s.ClusterSettings().SV) // We don't know the exact size of the write, but we can set lower and upper - // bounds. - writeCostLower := cfg.WriteBatchUnits + cfg.WriteRequestUnits - writeCostUpper := cfg.WriteBatchUnits + cfg.WriteRequestUnits + float64(32)*cfg.WriteUnitsPerByte - tolerance := 50.0 // Leave space for a couple of other background requests. - // burstWrites is a number of writes that don't exceed the burst limit. - burstWrites := int((cfg.Burst - tolerance) / writeCostUpper) - // tooManyWrites is a number of writes which definitely exceed the burst - // limit. - tooManyWrites := int(cfg.Burst/writeCostLower) + 2 - - // This test shouldn't take forever. If we're going to fail, better to - // do it in minutes than in an hour. - ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) - defer cancel() + // bounds for a "small" (up to 32 bytes) request. + + // If we put at least that many bytes into a single put, it should consume at + // least one percent of the configured burst, meaning if we send 100 such requests + // we should expect blocking. (We may block earlier due to background writes + // sneaking in). + // + // The below is based on the equation + // + // cost = WriteBatchUnits + WriteRequestUnits + bytes * WriteUnitsPerByte. + // + // and solving for the case in which cost is a percent of Burst. + numBytesForAtLeastOnePercentOfBurst := int((cfg.Burst/100 - cfg.WriteBatchUnits - cfg.WriteRequestUnits) / cfg.WriteUnitsPerByte) + require.NotZero(t, numBytesForAtLeastOnePercentOfBurst) + t.Logf("bytes for one percent of burst: %d", numBytesForAtLeastOnePercentOfBurst) + atLeastOnePercentValue := strings.Repeat("x", numBytesForAtLeastOnePercentOfBurst) + + // Spawn a helper that will detect if the rate limiter is blocking us. + // This prevents cases where the test would stall and fail opaquely instead + // of making it clear that writes on the main goroutine blocked unexpectedly. + watcher := func(ctx context.Context, t *testing.T, msg string) (_ context.Context, cancel func()) { + t.Helper() + t.Logf("testing: %v", msg) + ctx, cancel = context.WithCancel(ctx) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second): + t.Logf("total acquisitions: %d", numAcquired.Load()) + } + if !assert.Len(t, timeSource.Timers(), 0, msg) { + cancel() + } + } + }() + return ctx, cancel + } // Make sure that writes to the system tenant don't block, even if we // definitely exceed the burst rate. - for i := 0; i < tooManyWrites; i++ { - require.NoError(t, db.Put(ctx, mkKey(), 0)) + { + ctx, cancel := watcher(ctx, t, "system tenant should not be rate limited") + defer cancel() + + for i := 0; i < 100; i++ { + require.NoError(t, db.Put(ctx, mkKey(), atLeastOnePercentValue)) + } + cancel() } timeSource.Advance(time.Second) // Now ensure that in the same instant the write QPS limit does affect the - // tenant. First issue requests that can happen without blocking. - for i := 0; i < burstWrites; i++ { - require.NoError(t, ts.DB().Put(ctx, mkKey(), 0)) + // tenant. First issue a handful of small requests, which should not block + // as we can send ~100 larger requests before running out of burst budget. + // + // In the past, this test was trying to get very close to cfg.Burst without + // blocking, but that is quite brittle because there are dozens of background + // writes that sneak in during an average test run. So we are now intentionally + // staying very far from running out of burst. + { + ctx, cancel := watcher(ctx, t, "first writes should not experience blocking") + defer cancel() + + for i := 0; i < 5; i++ { + require.NoError(t, ts.DB().Put(ctx, mkKey(), 0)) + } + cancel() } - // Attempt to issue another request, make sure that it gets blocked by - // observing a timer. + // Now intentionally break through the burst and make sure this blocks. + // We observe blocking by noticing that a timer was created via the custom + // timeSource which we only handed to the tenant rate limiter. + t.Logf("testing: requests should eventually block") errCh := make(chan error, 1) + // doneCh is closed once we've verified blocking and have unblocked. + // This prevents the additional writes from potentially running into + // blocking again. + doneCh := make(chan struct{}) go func() { // Issue enough requests so that one has to block. - for i := burstWrites; i < tooManyWrites; i++ { - if err := ts.DB().Put(ctx, mkKey(), 0); err != nil { + ev := log.Every(100 * time.Millisecond) + for i := 0; i < 100; i++ { + if ev.ShouldLog() { + t.Logf("put %d", i+1) + } + if err := ts.DB().Put(ctx, mkKey(), atLeastOnePercentValue); err != nil { errCh <- err return } + select { + default: + case <-doneCh: + errCh <- nil + return + } } - errCh <- nil + t.Error("never blocked") + errCh <- errors.New("never blocked") }() testutils.SucceedsSoon(t, func() error { - timers := timeSource.Timers() - if len(timers) != 1 { - return errors.Errorf("seeing %d timers: %v", len(timers), timers) + if len(timeSource.Timers()) == 0 { + return errors.Errorf("not seeing any timers") } return nil }) + t.Log("blocking confirmed") + + // Allow the blocked request to proceed. + close(doneCh) // close first so that goroutine terminates once unblocked + timeSource.Advance(time.Second) + require.NoError(t, <-errCh) + t.Log("checking metrics") // Create some tooling to read and verify metrics off of the prometheus // endpoint. runner.Exec(t, `SET CLUSTER SETTING server.child_metrics.enabled = true`) @@ -292,30 +377,28 @@ func TestTenantRateLimiter(t *testing.T) { return string(read) } - // Allow the blocked request to proceed. - timeSource.Advance(time.Second) - require.NoError(t, <-errCh) - // Ensure that the metric for the admitted requests reflects the number of - // admitted requests. - // TODO(radu): this is fragile because a background write could sneak in and - // the count wouldn't match exactly. + // admitted requests. We run only shallow checks here due to background writes. m := getMetrics() lines := strings.Split(m, "\n") - tenantMetricStr := fmt.Sprintf(`kv_tenant_rate_limit_write_requests_admitted{store="1",tenant_id="%d"}`, tenantID.ToUint64()) + tenantMetricStr := fmt.Sprintf(`kv_tenant_rate_limit_write_requests_admitted{store="1",node_id="1",tenant_id="%d"}`, tenantID.ToUint64()) re := regexp.MustCompile(tenantMetricStr + ` (\d*)`) + var matched bool for _, line := range lines { match := re.FindStringSubmatch(line) if match != nil { + matched = true admittedMetricVal, err := strconv.Atoi(match[1]) require.NoError(t, err) - require.GreaterOrEqual(t, admittedMetricVal, tooManyWrites) - // Allow a tolerance for other requests performed while starting the - // tenant server. - require.Less(t, admittedMetricVal, tooManyWrites+400) + // The background chatter alone tends to put north of 100 writes in. + // But let's be conservative and not rely on that. Our blocking writes should + // get at least 50 in (we know 100 result in blocking). We can't and shouldn't + // add tighter checks here because they're a nightmare to debug should they fail. + require.GreaterOrEqual(t, admittedMetricVal, 50) break } } + require.True(t, matched, "did not match %s:\n\n%s", tenantMetricStr, m) } // Test that KV requests made by a tenant get a context annotated with the tenant ID. diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go index 9deb165223e6..ef4bb58f10a3 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go @@ -542,7 +542,7 @@ func TestConcurrencyManagerBasic(t *testing.T) { mon.runSync("update txn", func(ctx context.Context) { log.Eventf(ctx, "%s %s", verb, redact.Safe(txnName)) if err := c.updateTxnRecord(txn.ID, status, ts); err != nil { - d.Fatalf(t, err.Error()) + d.Fatalf(t, "%s", err) } }) return c.waitAndCollect(t, mon) diff --git a/pkg/kv/kvserver/flow_control_raft.go b/pkg/kv/kvserver/flow_control_raft.go new file mode 100644 index 000000000000..9efb306537e2 --- /dev/null +++ b/pkg/kv/kvserver/flow_control_raft.go @@ -0,0 +1,76 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvserver + +import ( + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" + "github.com/cockroachdb/cockroach/pkg/raft" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/raft/tracker" + "github.com/cockroachdb/cockroach/pkg/roachpb" +) + +type raftNodeForRACv2 struct { + *raft.RawNode +} + +var _ replica_rac2.RaftNode = raftNodeForRACv2{} + +func (rn raftNodeForRACv2) EnablePingForAdmittedLaggingLocked() { + panic("TODO(pav-kv): implement") +} + +func (rn raftNodeForRACv2) TermLocked() uint64 { + return rn.Term() +} + +func (rn raftNodeForRACv2) LeaderLocked() roachpb.ReplicaID { + return roachpb.ReplicaID(rn.Lead()) +} + +func (rn raftNodeForRACv2) StableIndexLocked() uint64 { + // TODO(pav-kv): implement. + return 0 +} + +func (rn raftNodeForRACv2) NextUnstableIndexLocked() uint64 { + return rn.NextUnstableIndex() +} + +func (rn raftNodeForRACv2) GetAdmittedLocked() [raftpb.NumPriorities]uint64 { + // TODO(pav-kv): implement. + return [raftpb.NumPriorities]uint64{} +} + +func (rn raftNodeForRACv2) SetAdmittedLocked([raftpb.NumPriorities]uint64) raftpb.Message { + panic("TODO(pav-kv): implement") +} + +func (rn raftNodeForRACv2) StepMsgAppRespForAdmittedLocked(m raftpb.Message) error { + return rn.RawNode.Step(m) +} + +func (rn raftNodeForRACv2) FollowerStateRaftMuLocked( + replicaID roachpb.ReplicaID, +) rac2.FollowerStateInfo { + // TODO(pav-kv): this is a temporary implementation. + status := rn.Status() + if progress, ok := status.Progress[raftpb.PeerID(replicaID)]; ok { + return rac2.FollowerStateInfo{ + State: progress.State, + Match: progress.Match, + Next: progress.Next, + } + } + + return rac2.FollowerStateInfo{State: tracker.StateProbe} +} diff --git a/pkg/kv/kvserver/flow_control_raft_transport_test.go b/pkg/kv/kvserver/flow_control_raft_transport_test.go index bad07f92a09d..b41113a7ee7c 100644 --- a/pkg/kv/kvserver/flow_control_raft_transport_test.go +++ b/pkg/kv/kvserver/flow_control_raft_transport_test.go @@ -165,7 +165,7 @@ func TestFlowControlRaftTransport(t *testing.T) { controlM[nodeID].dispatch, kvserver.NoopStoresFlowControlIntegration{}, controlM[nodeID].disconnectListener, - controlM[nodeID].piggybacker, + controlM[nodeID].piggybacker, nil, controlM[nodeID].knobs, ) rttc.GossipNode(nodeID, addr) @@ -631,6 +631,7 @@ func TestFlowControlRaftTransportV2(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, controlM[nodeID].disconnectListener, controlM[nodeID].piggybacker, + noopPiggybackedAdmittedResponseScheduler{}, controlM[nodeID].knobs, ) rttc.GossipNode(nodeID, addr) @@ -680,8 +681,11 @@ func TestFlowControlRaftTransportV2(t *testing.T) { toNodeID := parseNodeID(t, d, "node") toStoreID := parseStoreID(t, d, "store") rangeID := parseRangeID(t, d, "range") - control.piggybacker.AddMsgAppRespForLeader( - toNodeID, toStoreID, rangeID, raftpb.Message{}) + // TODO(pav-kv): test that these messages are actually sent in + // RaftMessageRequestBatch. + control.piggybacker.Add(toNodeID, kvflowcontrolpb.PiggybackedAdmittedState{ + RangeID: rangeID, ToStoreID: toStoreID, + }) return "" case "fallback-piggyback": @@ -788,3 +792,10 @@ func TestFlowControlRaftTransportV2(t *testing.T) { }, ) } + +type noopPiggybackedAdmittedResponseScheduler struct{} + +func (s noopPiggybackedAdmittedResponseScheduler) ScheduleAdmittedResponseForRangeRACv2( + ctx context.Context, msgs []kvflowcontrolpb.AdmittedResponseForRange, +) { +} diff --git a/pkg/kv/kvserver/flow_control_replica_integration.go b/pkg/kv/kvserver/flow_control_replica_integration.go index 965f09f80e29..95a7dc496ace 100644 --- a/pkg/kv/kvserver/flow_control_replica_integration.go +++ b/pkg/kv/kvserver/flow_control_replica_integration.go @@ -16,6 +16,7 @@ import ( "slices" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -440,3 +441,53 @@ func (f *replicaFlowControlIntegrationImpl) clearState(ctx context.Context) { f.lastKnownReplicas = roachpb.MakeReplicaSet(nil) f.disconnectedStreams = nil } + +type noopReplicaFlowControlIntegration struct{} + +func (n noopReplicaFlowControlIntegration) onBecameLeader(context.Context) {} +func (n noopReplicaFlowControlIntegration) onBecameFollower(context.Context) {} +func (n noopReplicaFlowControlIntegration) onDescChanged(context.Context) {} +func (n noopReplicaFlowControlIntegration) onFollowersPaused(context.Context) {} +func (n noopReplicaFlowControlIntegration) onRaftTransportDisconnected( + context.Context, ...roachpb.StoreID, +) { +} +func (n noopReplicaFlowControlIntegration) onRaftTicked(context.Context) {} +func (n noopReplicaFlowControlIntegration) onDestroyed(context.Context) {} +func (n noopReplicaFlowControlIntegration) handle() (kvflowcontrol.Handle, bool) { + return nil, false +} + +type replicaForRACv2 Replica + +var _ replica_rac2.Replica = &replicaForRACv2{} + +// RaftMuAssertHeld implements replica_rac2.Replica. +func (r *replicaForRACv2) RaftMuAssertHeld() { + r.raftMu.AssertHeld() +} + +// MuAssertHeld implements replica_rac2.Replica. +func (r *replicaForRACv2) MuAssertHeld() { + r.mu.AssertHeld() +} + +// MuLock implements replica_rac2.Replica. +func (r *replicaForRACv2) MuLock() { + r.mu.Lock() +} + +// MuUnlock implements replica_rac2.Replica. +func (r *replicaForRACv2) MuUnlock() { + r.mu.Unlock() +} + +// RaftNodeMuLocked implements replica_rac2.Replica. +func (r *replicaForRACv2) RaftNodeMuLocked() replica_rac2.RaftNode { + return raftNodeForRACv2{RawNode: r.mu.internalRaftGroup} +} + +// LeaseholderMuLocked implements replica_rac2.Replica. +func (r *replicaForRACv2) LeaseholderMuLocked() roachpb.ReplicaID { + return r.mu.state.Lease.Replica.ReplicaID +} diff --git a/pkg/kv/kvserver/flow_control_stores.go b/pkg/kv/kvserver/flow_control_stores.go index 6a879444c347..87c5081acb1c 100644 --- a/pkg/kv/kvserver/flow_control_stores.go +++ b/pkg/kv/kvserver/flow_control_stores.go @@ -12,10 +12,16 @@ package kvserver import ( "context" + "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowhandle" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/admission" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -50,6 +56,14 @@ func (sh *storesForFlowControl) Lookup( return handle, found } +// LookupReplicationAdmissionHandle is part of the StoresForFlowControl +// interface. +func (sh *storesForFlowControl) LookupReplicationAdmissionHandle( + rangeID roachpb.RangeID, +) (kvflowcontrol.ReplicationAdmissionHandle, bool) { + return sh.Lookup(rangeID) +} + // Inspect is part of the StoresForFlowControl interface. func (sh *storesForFlowControl) Inspect() []roachpb.RangeID { ls := (*Stores)(sh) @@ -107,21 +121,57 @@ func makeStoreForFlowControl(store *Store) *storeForFlowControl { func (sh *storeForFlowControl) Lookup( rangeID roachpb.RangeID, ) (_ kvflowcontrol.Handle, found bool) { - s := (*Store)(sh) - repl := s.GetReplicaIfExists(rangeID) + repl := sh.lookupReplica(rangeID) + if repl == nil { + return nil, false + } + repl.mu.Lock() + defer repl.mu.Unlock() + return repl.mu.replicaFlowControlIntegration.handle() +} + +// LookupReplicationAdmissionHandle is part of the StoresForFlowControl +// interface. +func (sh *storeForFlowControl) LookupReplicationAdmissionHandle( + rangeID roachpb.RangeID, +) (kvflowcontrol.ReplicationAdmissionHandle, bool) { + repl := sh.lookupReplica(rangeID) if repl == nil { return nil, false } + // NB: Admit is called soon after this lookup. + level := repl.flowControlV2.GetEnabledWhenLeader() + useV1 := level == replica_rac2.NotEnabledWhenLeader + var v1Handle kvflowcontrol.ReplicationAdmissionHandle + if useV1 { + repl.mu.Lock() + var found bool + v1Handle, found = repl.mu.replicaFlowControlIntegration.handle() + repl.mu.Unlock() + if !found { + return nil, found + } + } + // INVARIANT: useV1 => v1Handle was found. + return admissionDemuxHandle{ + v1Handle: v1Handle, + r: repl, + useV1: useV1, + }, true +} +func (sh *storeForFlowControl) lookupReplica(rangeID roachpb.RangeID) *Replica { + s := (*Store)(sh) + repl := s.GetReplicaIfExists(rangeID) + if repl == nil { + return nil + } if knobs := s.TestingKnobs().FlowControlTestingKnobs; knobs != nil && knobs.UseOnlyForScratchRanges && !repl.IsScratchRange() { - return nil, false + return nil } - - repl.mu.Lock() - defer repl.mu.Unlock() - return repl.mu.replicaFlowControlIntegration.handle() + return repl } // ResetStreams is part of the StoresForFlowControl interface. @@ -205,6 +255,14 @@ func (l NoopStoresFlowControlIntegration) Lookup(roachpb.RangeID) (kvflowcontrol return nil, false } +// LookupReplicationAdmissionHandle is part of the StoresForFlowControl +// interface. +func (l NoopStoresFlowControlIntegration) LookupReplicationAdmissionHandle( + rangeID roachpb.RangeID, +) (kvflowcontrol.ReplicationAdmissionHandle, bool) { + return l.Lookup(rangeID) +} + // ResetStreams is part of the StoresForFlowControl interface. func (l NoopStoresFlowControlIntegration) ResetStreams(context.Context) { } @@ -220,3 +278,106 @@ func (NoopStoresFlowControlIntegration) OnRaftTransportDisconnected( context.Context, ...roachpb.StoreID, ) { } + +// StoresForRACv2 implements various interfaces to route to the relevant +// range's Processor. +type StoresForRACv2 interface { + admission.OnLogEntryAdmitted + PiggybackedAdmittedResponseScheduler +} + +// PiggybackedAdmittedResponseScheduler routes followers piggybacked admitted +// response messages to the relevant ranges, and schedules those ranges for +// processing. +type PiggybackedAdmittedResponseScheduler interface { + ScheduleAdmittedResponseForRangeRACv2( + ctx context.Context, msgs []kvflowcontrolpb.AdmittedResponseForRange) +} + +func MakeStoresForRACv2(stores *Stores) StoresForRACv2 { + return (*storesForRACv2)(stores) +} + +type storesForRACv2 Stores + +// AdmittedLogEntry implements admission.OnLogEntryAdmitted. +func (ss *storesForRACv2) AdmittedLogEntry( + ctx context.Context, cbState admission.LogEntryAdmittedCallbackState, +) { + p := ss.lookup(cbState.StoreID, cbState.RangeID, cbState.ReplicaID) + if p == nil { + return + } + p.AdmittedLogEntry(ctx, replica_rac2.EntryForAdmissionCallbackState{ + Mark: rac2.LogMark{Term: cbState.LeaderTerm, Index: cbState.Pos.Index}, + Priority: cbState.RaftPri, + }) +} + +func (ss *storesForRACv2) lookup( + storeID roachpb.StoreID, rangeID roachpb.RangeID, replicaID roachpb.ReplicaID, +) replica_rac2.Processor { + ls := (*Stores)(ss) + s, err := ls.GetStore(storeID) + if err != nil { + // Store has disappeared! + panic(err) + } + r := s.GetReplicaIfExists(rangeID) + if r == nil || r.replicaID != replicaID { + return nil + } + return r.flowControlV2 +} + +// ScheduleAdmittedResponseForRangeRACv2 implements PiggybackedAdmittedResponseScheduler. +func (ss *storesForRACv2) ScheduleAdmittedResponseForRangeRACv2( + ctx context.Context, msgs []kvflowcontrolpb.AdmittedResponseForRange, +) { + ls := (*Stores)(ss) + for _, m := range msgs { + s, err := ls.GetStore(m.LeaderStoreID) + if err != nil { + log.Errorf(ctx, "store %s not found", m.LeaderStoreID) + continue + } + repl := s.GetReplicaIfExists(m.RangeID) + if repl == nil { + continue + } + repl.flowControlV2.EnqueuePiggybackedAdmittedAtLeader(m.Msg) + s.scheduler.EnqueueRACv2PiggybackAdmitted(m.RangeID) + } +} + +type admissionDemuxHandle struct { + v1Handle kvflowcontrol.ReplicationAdmissionHandle + r *Replica + useV1 bool +} + +// Admit implements kvflowcontrol.ReplicationAdmissionHandle. +func (h admissionDemuxHandle) Admit( + ctx context.Context, pri admissionpb.WorkPriority, ct time.Time, +) (admitted bool, err error) { + if h.useV1 { + admitted, err = h.v1Handle.Admit(ctx, pri, ct) + if err != nil { + return admitted, err + } + // It is possible a transition from v1 => v2 happened while waiting, which + // can cause either value of admitted. See the comment in + // ReplicationAdmissionHandle. + level := h.r.flowControlV2.GetEnabledWhenLeader() + if level == replica_rac2.NotEnabledWhenLeader { + return admitted, err + } + // Transition from v1 => v2 happened while waiting. Fall through to wait + // on v2, since it is possible that nothing was waited on, or the + // overloaded stream was not waited on. This double wait is acceptable + // since during the transition from v1 => v2 only elastic work should be + // subject to replication AC, and we would like to err towards not + // overloading. + } + return h.r.flowControlV2.AdmitForEval(ctx, pri, ct) +} diff --git a/pkg/kv/kvserver/kvadmission/BUILD.bazel b/pkg/kv/kvserver/kvadmission/BUILD.bazel index a972d55a6bbc..5df579399fbd 100644 --- a/pkg/kv/kvserver/kvadmission/BUILD.bazel +++ b/pkg/kv/kvserver/kvadmission/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/kv/kvpb", "//pkg/kv/kvserver/kvflowcontrol", "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", + "//pkg/kv/kvserver/kvflowcontrol/replica_rac2", "//pkg/kv/kvserver/raftlog", "//pkg/raft/raftpb", "//pkg/roachpb", diff --git a/pkg/kv/kvserver/kvadmission/kvadmission.go b/pkg/kv/kvserver/kvadmission/kvadmission.go index 6be467d4f54d..16837b4276cd 100644 --- a/pkg/kv/kvserver/kvadmission/kvadmission.go +++ b/pkg/kv/kvserver/kvadmission/kvadmission.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftlog" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -57,12 +58,12 @@ var elasticCPUDurationPerInternalLowPriRead = settings.RegisterDurationSetting( settings.DurationInRange(admission.MinElasticCPUDuration, admission.MaxElasticCPUDuration), ) -// internalLowPriReadElasticControlEnabled determines whether internally -// submitted low pri reads integrate with elastic CPU control. -var internalLowPriReadElasticControlEnabled = settings.RegisterBoolSetting( +// elasticAdmissionAllLowPri determines whether internally +// submitted low bulk pri requests integrate with elastic CPU control. +var elasticAdmissionAllLowPri = settings.RegisterBoolSetting( settings.SystemOnly, - "kvadmission.low_pri_read_elastic_control.enabled", - "determines whether the internally submitted low priority reads integrate with elastic CPU control", + "kvadmission.elastic_control_bulk_low_priority.enabled", + "determines whether the all low bulk priority requests integrate with elastic CPU control", true, ) @@ -181,7 +182,10 @@ type Controller interface { FollowerStoreWriteBytes(roachpb.StoreID, FollowerStoreWriteBytes) // AdmitRaftEntry informs admission control of a raft log entry being // written to storage. - AdmitRaftEntry(context.Context, roachpb.TenantID, roachpb.StoreID, roachpb.RangeID, raftpb.Entry) + AdmitRaftEntry( + _ context.Context, _ roachpb.TenantID, _ roachpb.StoreID, _ roachpb.RangeID, _ roachpb.ReplicaID, + leaderTerm uint64, _ raftpb.Entry) + replica_rac2.ACWorkQueue } // TenantWeightProvider can be periodically asked to provide the tenant @@ -334,7 +338,7 @@ func (n *controllerImpl) AdmitKVWork( var admitted bool attemptFlowControl := kvflowcontrol.Enabled.Get(&n.settings.SV) if attemptFlowControl && !bypassAdmission { - kvflowHandle, found := n.kvflowHandles.Lookup(ba.RangeID) + kvflowHandle, found := n.kvflowHandles.LookupReplicationAdmissionHandle(ba.RangeID) if !found { return Handle{}, nil } @@ -395,14 +399,13 @@ func (n *controllerImpl) AdmitKVWork( // handed out through this mechanism, as a way to provide latency // isolation to non-elastic ("latency sensitive") work running on the // same machine. - // - We do the same for internally submitted low priority reads in + // - We do the same for internally submitted bulk low priority requests in // general (notably, for KV work done on the behalf of row-level TTL - // reads). Everything admissionpb.UserLowPri and above uses the slots - // mechanism. - isInternalLowPriRead := ba.IsReadOnly() && admissionInfo.Priority < admissionpb.UserLowPri + // reads or other jobs). Everything admissionpb.UserLowPri and above uses + // the slots mechanism. shouldUseElasticCPU := (exportRequestElasticControlEnabled.Get(&n.settings.SV) && ba.IsSingleExportRequest()) || - (internalLowPriReadElasticControlEnabled.Get(&n.settings.SV) && isInternalLowPriRead) + (admissionInfo.Priority <= admissionpb.BulkLowPri && elasticAdmissionAllLowPri.Get(&n.settings.SV)) if shouldUseElasticCPU { var admitDuration time.Duration @@ -570,12 +573,15 @@ func (n *controllerImpl) FollowerStoreWriteBytes( followerWriteBytes.NumEntries, followerWriteBytes.StoreWorkDoneInfo) } -// AdmitRaftEntry implements the Controller interface. +// AdmitRaftEntry implements the Controller interface. It is only used for the +// RACv1 protocol. func (n *controllerImpl) AdmitRaftEntry( ctx context.Context, tenantID roachpb.TenantID, storeID roachpb.StoreID, rangeID roachpb.RangeID, + replicaID roachpb.ReplicaID, + leaderTerm uint64, entry raftpb.Entry, ) { typ, _, err := raftlog.EncodingOf(entry) @@ -624,14 +630,17 @@ func (n *controllerImpl) AdmitRaftEntry( RequestedCount: int64(len(entry.Data)), } wi.ReplicatedWorkInfo = admission.ReplicatedWorkInfo{ - Enabled: true, - RangeID: rangeID, - Origin: meta.AdmissionOriginNode, + Enabled: true, + RangeID: rangeID, + ReplicaID: replicaID, + LeaderTerm: leaderTerm, LogPosition: admission.LogPosition{ Term: entry.Term, Index: entry.Index, }, - Ingested: typ.IsSideloaded(), + Origin: meta.AdmissionOriginNode, + IsV2Protocol: false, + Ingested: typ.IsSideloaded(), } handle, err := storeAdmissionQ.Admit(ctx, admission.StoreWriteWorkInfo{ @@ -646,6 +655,54 @@ func (n *controllerImpl) AdmitRaftEntry( } } +var _ replica_rac2.ACWorkQueue = &controllerImpl{} + +// Admit implements replica_rac2.ACWorkQueue. It is only used for the RACv2 protocol. +func (n *controllerImpl) Admit(ctx context.Context, entry replica_rac2.EntryForAdmission) bool { + storeAdmissionQ := n.storeGrantCoords.TryGetQueueForStore(entry.StoreID) + if storeAdmissionQ == nil { + log.Errorf(ctx, "unable to find queue for store: %s", entry.StoreID) + return false // nothing to do + } + + if entry.RequestedCount == 0 { + log.Fatal(ctx, "found (unexpected) empty raft command for below-raft admission") + } + wi := admission.WorkInfo{ + TenantID: entry.TenantID, + Priority: entry.Priority, + CreateTime: entry.CreateTime, + BypassAdmission: false, + RequestedCount: entry.RequestedCount, + } + wi.ReplicatedWorkInfo = admission.ReplicatedWorkInfo{ + Enabled: true, + RangeID: entry.RangeID, + ReplicaID: entry.ReplicaID, + LeaderTerm: entry.CallbackState.Mark.Term, + LogPosition: admission.LogPosition{ + Term: 0, // Ignored by callback in RACv2. + Index: entry.CallbackState.Mark.Index, + }, + Origin: 0, + RaftPri: entry.CallbackState.Priority, + IsV2Protocol: true, + Ingested: entry.Ingested, + } + + handle, err := storeAdmissionQ.Admit(ctx, admission.StoreWriteWorkInfo{ + WorkInfo: wi, + }) + if err != nil { + log.Errorf(ctx, "error while admitting to store admission queue: %v", err) + return false + } + if handle.UseAdmittedWorkDone() { + log.Fatalf(ctx, "unexpected handle.UseAdmittedWorkDone") + } + return true +} + // FollowerStoreWriteBytes captures stats about writes done to a store by a // replica that is not the leaseholder. These are used for admission control. type FollowerStoreWriteBytes struct { diff --git a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrol.go b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrol.go index 34fd1963485b..8e917b802c77 100644 --- a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrol.go +++ b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrol.go @@ -181,6 +181,28 @@ type Controller interface { // See I2, I3a and [^7] in kvflowcontrol/doc.go. } +// ReplicationAdmissionHandle abstracts waiting for admission across RACv1 and RACv2. +type ReplicationAdmissionHandle interface { + // Admit seeks admission to replicate data, regardless of size, for work + // with the given priority and create-time. This blocks until there are flow + // tokens available for connected streams. This returns true if the request + // was admitted through flow control. Ignore the first return type if err != + // nil. admitted == false && err == nil is a valid return, when something + // caused the callee to not care whether flow tokens were available. This + // can happen for at least the following reasons: + // - Configuration specifies the given WorkPriority is not subject to + // replication AC. + // - The callee doesn't think it is the leader or has been closed/destroyed. + // + // The latter can happen in the midst of a transition from RACv1 => RACv2. + // In this case if the callee waited on at least one connectedStream and was + // admitted, it will return (true, nil). This includes the case where the + // connectedStream was closed while waiting. If there were no + // connectedStreams (because they were already closed) it will return + // (false, nil). + Admit(context.Context, admissionpb.WorkPriority, time.Time) (admitted bool, _ error) +} + // Handle is used to interface with replication flow control; it's typically // backed by a node-level kvflowcontrol.Controller. Handles are held on replicas // initiating replication traffic, i.e. are both the leaseholder and raft @@ -195,14 +217,7 @@ type Controller interface { // given priority, takes log position into account -- see // kvflowcontrolpb.AdmittedRaftLogEntries for more details). type Handle interface { - // Admit seeks admission to replicate data, regardless of size, for work with - // the given priority and create-time. This blocks until there are flow tokens - // available for all connected streams. This returns true if the request was - // admitted through flow control. Ignore the first return type if err != nil. - // admitted == false && err == nil is a valid return, when something (e.g. - // configuration) caused the callee to not care whether flow tokens were - // available. - Admit(context.Context, admissionpb.WorkPriority, time.Time) (admitted bool, _ error) + ReplicationAdmissionHandle // DeductTokensFor deducts (without blocking) flow tokens for replicating // work with given priority along connected streams. The deduction is // tracked with respect to the specific raft log position it's expecting it @@ -285,6 +300,12 @@ type Handles interface { // part of #95563. // // Iterate(roachpb.StoreID, func(context.Context, Handle, Stream)) + + // LookupReplicationAdmissionHandle looks up the ReplicationAdmissionHandle + // for the specific range (or rather, the replica of the specific range + // that's locally held). The bool is false if no handle was found, in which + // case the caller must use the pre-replication-admission-control path. + LookupReplicationAdmissionHandle(roachpb.RangeID) (ReplicationAdmissionHandle, bool) } // HandleFactory is used to construct new Handles. diff --git a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.go b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.go index af6323704bbb..77a185202dd2 100644 --- a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.go +++ b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.go @@ -61,3 +61,20 @@ func (a AdmittedResponseForRange) String() string { func (a AdmittedResponseForRange) SafeFormat(w redact.SafePrinter, _ rune) { w.Printf("admitted-response (s%s r%s %s)", a.LeaderStoreID, a.RangeID, a.Msg.String()) } + +func (a AdmittedState) String() string { + return redact.StringWithoutMarkers(a) +} + +func (a AdmittedState) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("admitted=t%d/%s", a.Term, a.Admitted) +} + +func (a PiggybackedAdmittedState) String() string { + return redact.StringWithoutMarkers(a) +} + +func (a PiggybackedAdmittedState) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("[r%s,s%s,%d->%d] %s", + a.RangeID, a.ToStoreID, a.FromReplicaID, a.ToReplicaID, a.Admitted.String()) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.proto b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.proto index 3291e73584f8..e6f41eb239cc 100644 --- a/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.proto +++ b/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb/kvflowcontrol.proto @@ -133,6 +133,8 @@ message RaftLogPosition { // AdmittedResponseForRange is only used in RACv2. It contains a MsgAppResp // from a follower to a leader, that was generated to advance the admitted // vector for that follower, maintained by the leader. +// +// TODO(pav-kv): remove this type and use PiggybackedAdmittedState. message AdmittedResponseForRange { option (gogoproto.goproto_stringer) = false; @@ -149,3 +151,44 @@ message AdmittedResponseForRange { // Msg is the MsgAppResp containing the admitted vector. raftpb.Message msg = 3 [(gogoproto.nullable) = false]; } + +// AdmittedState communicates a replica's vector of admitted log indices at +// different priorities to the leader of a range. +// +// Used only in RACv2. +message AdmittedState { + option (gogoproto.goproto_stringer) = false; + // Term is the leader term of the log for which the Admitted indices were + // computed. The indices are consistent with this leader's log. + uint64 term = 1; + // Admitted contains admitted log indices for each priority < NumPriorities. + repeated uint64 admitted = 2; +} + +// PiggybackedAdmittedState wraps the AdmittedState with the routing information +// needed to deliver the admitted vector to a particular leader replica, and for +// it to know who sent it. +// +// Used only in RACv2. +message PiggybackedAdmittedState { + option (gogoproto.goproto_stringer) = false; + + // RangeID is the ID of the range to which this message is related. Used for + // routing at the leader node. + uint64 range_id = 1 [(gogoproto.customname) = "RangeID", + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"]; + // ToStoreID is the store at the leader containing the leader replica. Used + // for routing at the leader node. + uint64 to_store_id = 2 [(gogoproto.customname) = "ToStoreID", + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.StoreID"]; + + // FromReplicaID is the replica sending this message. + uint64 from_replica_id = 3 [(gogoproto.customname) = "FromReplicaID", + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; + // ToReplicaID is the leader replica receiving this message. + uint64 to_replica_id = 4 [(gogoproto.customname) = "ToReplicaID", + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID"]; + + // Admitted is the admitted vector at the sending replica. + AdmittedState admitted = 5 [(gogoproto.nullable) = false]; +} diff --git a/pkg/kv/kvserver/kvflowcontrol/kvflowdispatch/kvflowdispatch_test.go b/pkg/kv/kvserver/kvflowcontrol/kvflowdispatch/kvflowdispatch_test.go index 3102cca82e06..29ee17a30450 100644 --- a/pkg/kv/kvserver/kvflowcontrol/kvflowdispatch/kvflowdispatch_test.go +++ b/pkg/kv/kvserver/kvflowcontrol/kvflowdispatch/kvflowdispatch_test.go @@ -226,6 +226,12 @@ func (d dummyHandles) Lookup(id roachpb.RangeID) (kvflowcontrol.Handle, bool) { return nil, false } +func (d dummyHandles) LookupReplicationAdmissionHandle( + rangeID roachpb.RangeID, +) (kvflowcontrol.ReplicationAdmissionHandle, bool) { + return d.Lookup(rangeID) +} + func (d dummyHandles) ResetStreams(ctx context.Context) {} func (d dummyHandles) Inspect() []roachpb.RangeID { diff --git a/pkg/kv/kvserver/kvflowcontrol/node_rac2/BUILD.bazel b/pkg/kv/kvserver/kvflowcontrol/node_rac2/BUILD.bazel index dd51ab7702d8..2700c4070a7a 100644 --- a/pkg/kv/kvserver/kvflowcontrol/node_rac2/BUILD.bazel +++ b/pkg/kv/kvserver/kvflowcontrol/node_rac2/BUILD.bazel @@ -11,7 +11,6 @@ go_library( deps = [ "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", "//pkg/kv/kvserver/kvflowcontrol/replica_rac2", - "//pkg/raft/raftpb", "//pkg/roachpb", "//pkg/util/syncutil", ], @@ -24,7 +23,6 @@ go_test( embed = [":node_rac2"], deps = [ "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", - "//pkg/raft/raftpb", "//pkg/roachpb", "//pkg/testutils/datapathutils", "//pkg/util/leaktest", diff --git a/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker.go b/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker.go index 4c443bab6c4d..580b6dd8b43a 100644 --- a/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker.go +++ b/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" - "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) @@ -29,7 +28,7 @@ type PiggybackMsgReader interface { // least one message will be popped. PopMsgsForNode( now time.Time, nodeID roachpb.NodeID, maxBytes int64, - ) (msgs []kvflowcontrolpb.AdmittedResponseForRange, remainingMsgs int) + ) (_ []kvflowcontrolpb.PiggybackedAdmittedState, remainingMsgs int) // NodesWithMsgs is used to periodically drop msgs from disconnected nodes. // See RaftTransport.dropFlowTokensForDisconnectedNodes. NodesWithMsgs(now time.Time) []roachpb.NodeID @@ -46,7 +45,7 @@ type AdmittedPiggybacker struct { } type rangeMap struct { - rangeMap map[roachpb.RangeID]kvflowcontrolpb.AdmittedResponseForRange + rangeMap map[roachpb.RangeID]kvflowcontrolpb.PiggybackedAdmittedState transitionToEmptyTime time.Time } @@ -59,32 +58,28 @@ func NewAdmittedPiggybacker() *AdmittedPiggybacker { var _ PiggybackMsgReader = &AdmittedPiggybacker{} var _ replica_rac2.AdmittedPiggybacker = &AdmittedPiggybacker{} -// AddMsgAppRespForLeader implements replica_rac2.AdmittedPiggybacker. -func (ap *AdmittedPiggybacker) AddMsgAppRespForLeader( - nodeID roachpb.NodeID, storeID roachpb.StoreID, rangeID roachpb.RangeID, msg raftpb.Message, +// Add implements replica_rac2.AdmittedPiggybacker. +func (ap *AdmittedPiggybacker) Add( + nodeID roachpb.NodeID, msg kvflowcontrolpb.PiggybackedAdmittedState, ) { ap.mu.Lock() defer ap.mu.Unlock() rm, ok := ap.mu.msgsForNode[nodeID] if !ok { - rm = &rangeMap{rangeMap: map[roachpb.RangeID]kvflowcontrolpb.AdmittedResponseForRange{}} + rm = &rangeMap{rangeMap: map[roachpb.RangeID]kvflowcontrolpb.PiggybackedAdmittedState{}} ap.mu.msgsForNode[nodeID] = rm } - rm.rangeMap[rangeID] = kvflowcontrolpb.AdmittedResponseForRange{ - LeaderStoreID: storeID, - RangeID: rangeID, - Msg: msg, - } + rm.rangeMap[msg.RangeID] = msg } -// Made-up number. There are 10+ integers, all varint encoded, many of which +// Made-up number. There are < 10 integers, all varint encoded, many of which // like nodeID, storeID, replicaIDs etc. will be small. -const admittedForRangeRACv2SizeBytes = 50 +const admittedForRangeRACv2SizeBytes = 40 // PopMsgsForNode implements PiggybackMsgReader. func (ap *AdmittedPiggybacker) PopMsgsForNode( now time.Time, nodeID roachpb.NodeID, maxBytes int64, -) (msgs []kvflowcontrolpb.AdmittedResponseForRange, remainingMsgs int) { +) (_ []kvflowcontrolpb.PiggybackedAdmittedState, remainingMsgs int) { if ap == nil { return nil, 0 } @@ -94,13 +89,16 @@ func (ap *AdmittedPiggybacker) PopMsgsForNode( if !ok || len(rm.rangeMap) == 0 { return nil, 0 } - maxEntries := maxBytes / admittedForRangeRACv2SizeBytes + // NB: +1 to include at least one entry. + maxEntries := maxBytes/admittedForRangeRACv2SizeBytes + 1 + msgs := make([]kvflowcontrolpb.PiggybackedAdmittedState, 0, + min(int64(len(rm.rangeMap)), maxEntries)) for rangeID, msg := range rm.rangeMap { - msgs = append(msgs, msg) - delete(rm.rangeMap, rangeID) - if int64(len(msgs)) > maxEntries { + if len(msgs) == cap(msgs) { break } + msgs = append(msgs, msg) + delete(rm.rangeMap, rangeID) } n := len(rm.rangeMap) if n == 0 { diff --git a/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker_test.go b/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker_test.go index 19beb0d9b48f..7aa9ada90306 100644 --- a/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker_test.go +++ b/pkg/kv/kvserver/kvflowcontrol/node_rac2/admitted_piggybacker_test.go @@ -20,7 +20,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" - "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -38,14 +37,20 @@ func TestPiggybacker(t *testing.T) { func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "add": - var nodeID, storeID, rangeID, match int + var nodeID, storeID, rangeID, from, to, term int d.ScanArgs(t, "node-id", &nodeID) d.ScanArgs(t, "store-id", &storeID) d.ScanArgs(t, "range-id", &rangeID) - // Match is just a placeholder to differentiate messages in the test. - d.ScanArgs(t, "match", &match) - p.AddMsgAppRespForLeader(roachpb.NodeID(nodeID), roachpb.StoreID(storeID), - roachpb.RangeID(rangeID), raftpb.Message{Match: uint64(match)}) + d.ScanArgs(t, "from", &from) + d.ScanArgs(t, "to", &to) + d.ScanArgs(t, "term", &term) + p.Add(roachpb.NodeID(nodeID), kvflowcontrolpb.PiggybackedAdmittedState{ + RangeID: roachpb.RangeID(rangeID), + ToStoreID: roachpb.StoreID(storeID), + FromReplicaID: roachpb.ReplicaID(from), + ToReplicaID: roachpb.ReplicaID(to), + Admitted: kvflowcontrolpb.AdmittedState{Term: uint64(term)}, + }) return "" case "nodes-with-msgs": @@ -71,13 +76,13 @@ func TestPiggybacker(t *testing.T) { var nodeID int d.ScanArgs(t, "node-id", &nodeID) msgs, remaining := p.PopMsgsForNode(ts, roachpb.NodeID(nodeID), math.MaxInt64) - slices.SortFunc(msgs, func(a, b kvflowcontrolpb.AdmittedResponseForRange) int { + slices.SortFunc(msgs, func(a, b kvflowcontrolpb.PiggybackedAdmittedState) int { return cmp.Compare(a.RangeID, b.RangeID) }) var b strings.Builder fmt.Fprintf(&b, "msgs:\n") for _, msg := range msgs { - fmt.Fprintf(&b, "s%s, r%s, match=%d\n", msg.LeaderStoreID, msg.RangeID, msg.Msg.Match) + fmt.Fprintf(&b, "%s\n", msg) } fmt.Fprintf(&b, "remaining-msgs: %d\n", remaining) return b.String() @@ -100,15 +105,15 @@ func TestPiggybackerMaxBytes(t *testing.T) { defer log.Scope(t).Close(t) p := NewAdmittedPiggybacker() - p.AddMsgAppRespForLeader(1, 1, 1, raftpb.Message{}) - p.AddMsgAppRespForLeader(1, 1, 2, raftpb.Message{}) + p.Add(1, kvflowcontrolpb.PiggybackedAdmittedState{RangeID: 1, ToStoreID: 1}) + p.Add(1, kvflowcontrolpb.PiggybackedAdmittedState{RangeID: 2, ToStoreID: 1}) // Both are popped. msgs, remaining := p.PopMsgsForNode(time.UnixMilli(1), 1, 60) require.Equal(t, 2, len(msgs)) require.Equal(t, 0, remaining) - p.AddMsgAppRespForLeader(1, 1, 1, raftpb.Message{}) - p.AddMsgAppRespForLeader(1, 1, 2, raftpb.Message{}) + p.Add(1, kvflowcontrolpb.PiggybackedAdmittedState{RangeID: 1, ToStoreID: 1}) + p.Add(1, kvflowcontrolpb.PiggybackedAdmittedState{RangeID: 2, ToStoreID: 1}) // Only one is popped. msgs, remaining = p.PopMsgsForNode(time.UnixMilli(1), 1, 20) require.Equal(t, 1, len(msgs)) diff --git a/pkg/kv/kvserver/kvflowcontrol/node_rac2/testdata/piggybacker b/pkg/kv/kvserver/kvflowcontrol/node_rac2/testdata/piggybacker index cf7004e8e4cf..f3bc91adf162 100644 --- a/pkg/kv/kvserver/kvflowcontrol/node_rac2/testdata/piggybacker +++ b/pkg/kv/kvserver/kvflowcontrol/node_rac2/testdata/piggybacker @@ -10,11 +10,11 @@ msgs: remaining-msgs: 0 # Add for node 1. -add node-id=1 store-id=2 range-id=3 match=6 +add node-id=1 store-id=2 range-id=3 from=2 to=1 term=6 ---- # Add for node 11. -add node-id=11 store-id=12 range-id=13 match=14 +add node-id=11 store-id=12 range-id=13 from=3 to=1 term=14 ---- nodes-with-msgs time-sec=2 @@ -23,15 +23,15 @@ n1 n11 map len: 2 # Add another for node 11, for a different range. -add node-id=11 store-id=22 range-id=23 match=24 +add node-id=11 store-id=22 range-id=23 from=2 to=1 term=24 ---- # Pop both for node 11. pop node-id=11 time-sec=2 ---- msgs: -s12, r13, match=14 -s22, r23, match=24 +[r13,s12,3->1] admitted=t14/[] +[r23,s22,2->1] admitted=t24/[] remaining-msgs: 0 # There is still an empty map entry for node 11. @@ -47,14 +47,14 @@ n1 map len: 1 # Overwrite the msg for the range at node 1. -add node-id=1 store-id=2 range-id=3 match=7 +add node-id=1 store-id=2 range-id=3 from=2 to=1 term=25 ---- # Pop for node 1. There was only one msg. pop node-id=1 time-sec=64 ---- msgs: -s2, r3, match=7 +[r3,s2,2->1] admitted=t25/[] remaining-msgs: 0 # The map entry for node 1 is garbage collected. diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/BUILD.bazel b/pkg/kv/kvserver/kvflowcontrol/rac2/BUILD.bazel index ab4d9177c893..8fc38396ae17 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/BUILD.bazel +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/BUILD.bazel @@ -3,41 +3,69 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "rac2", srcs = [ + "log_tracker.go", + "metrics.go", "priority.go", "range_controller.go", "store_stream.go", "token_counter.go", + "token_tracker.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2", visibility = ["//visibility:public"], deps = [ + "//pkg/base", "//pkg/kv/kvserver/kvflowcontrol", + "//pkg/kv/kvserver/raftlog", + "//pkg/raft", "//pkg/raft/raftpb", + "//pkg/raft/tracker", "//pkg/roachpb", "//pkg/settings/cluster", "//pkg/util/admission/admissionpb", "//pkg/util/buildutil", + "//pkg/util/hlc", "//pkg/util/log", + "//pkg/util/metric", "//pkg/util/syncutil", + "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", + "@com_github_dustin_go_humanize//:go-humanize", ], ) go_test( name = "rac2_test", srcs = [ + "log_tracker_test.go", "priority_test.go", + "range_controller_test.go", + "store_stream_test.go", "token_counter_test.go", + "token_tracker_test.go", ], data = glob(["testdata/**"]), embed = [":rac2"], deps = [ "//pkg/kv/kvserver/kvflowcontrol", + "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", + "//pkg/kv/kvserver/kvserverbase", + "//pkg/kv/kvserver/kvserverpb", + "//pkg/kv/kvserver/raftlog", "//pkg/raft/raftpb", + "//pkg/raft/tracker", + "//pkg/roachpb", "//pkg/settings/cluster", + "//pkg/testutils/datapathutils", "//pkg/util/admission/admissionpb", + "//pkg/util/hlc", + "//pkg/util/humanizeutil", "//pkg/util/leaktest", "//pkg/util/log", + "//pkg/util/protoutil", + "//pkg/util/stop", + "//pkg/util/syncutil", + "//pkg/util/timeutil", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_dustin_go_humanize//:go-humanize", "@com_github_stretchr_testify//require", diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker.go b/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker.go new file mode 100644 index 000000000000..06fb23496847 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker.go @@ -0,0 +1,307 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + "fmt" + "strings" + + "github.com/cockroachdb/cockroach/pkg/raft" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" +) + +// LogMark is a position in a log consistent with the leader at a specific term. +type LogMark = raft.LogMark + +// AdmittedVector contains admitted log indices for each priority. +type AdmittedVector struct { + // Term is the leader term to which the admitted log indices relate to. + Term uint64 + // Admitted contains admitted indices in the Term's log. + Admitted [raftpb.NumPriorities]uint64 +} + +// LogTracker tracks the durable and logically admitted state of a raft log. +// +// Writes to a raft log are ordered by LogMark (term, index) where term is the +// leader term on whose behalf entries are currently written, and index is the +// entry position in the log as seen by this leader. The index can only regress +// if term goes up, and a new leader overwrites a suffix of the log. +// +// Integration with storage guarantees that completion of a (term, index) +// write/sync means that all writes made under lower terms, or same term and +// lower log indices, have been completed. A similar guarantee comes for +// admissions at each priority. +type LogTracker struct { + // last is the latest log mark observed by the tracker. + last LogMark + // stable is the durable log index, in the last.Term log coordinate system. + // Entries in (stable, last.Index] are still in the write queue. + stable uint64 + // waiting contains, for each priority, entries present in the last.Term log + // waiting for admission. + // + // Invariants: + // - waiting[pri][i].Index <= last.Index + // - waiting[pri][i].Term <= last.Term + // - waiting[pri][i].Index < waiting[pri][i+1].Index + // - waiting[pri][i].Term <= waiting[pri][i+1].Term + waiting [raftpb.NumPriorities][]LogMark +} + +// NewLogTracker returns a LogTracker initialized to the given log mark. The +// caller must make sure that the log mark is durable. +func NewLogTracker(stable LogMark) LogTracker { + return LogTracker{last: stable, stable: stable.Index} +} + +// Stable returns the currently observed stable log mark. +func (l *LogTracker) Stable() LogMark { + return LogMark{Term: l.last.Term, Index: l.stable} +} + +// Admitted returns the current admitted log state, tied to the latest observed +// leader term. +// +// The admitted index for a priority is computed as one before the min entry +// index in the waiting queue. The indices are capped at the stable log index, +// and bumped to stable index for priorities which don't have any entries +// waiting for admission. +// +// Guarantees: +// - the term never regresses, +// - admitted indices never regress if the term does not change, +// - indices converge to the stable index which converges to the last index. +func (l *LogTracker) Admitted() AdmittedVector { + a := AdmittedVector{Term: l.last.Term} + for pri, marks := range l.waiting { + index := l.stable + if len(marks) != 0 { + index = min(index, marks[0].Index-1) + } + a.Admitted[pri] = index + } + return a +} + +// Append informs the tracker that log entries at indices (after, to.Index] are +// about to be sent to stable storage, on behalf of the to.Term leader. +// +// All log storage writes must be registered with the Append call, ordered by +// LogMark, with no gaps. Any entries in the (after, to.Index] batch, that are +// subject to admission control, should be registered with the Register call +// before the batch is sent to storage. +// +// Returns true if the admitted vector has changed. The only way it can change +// is when the leader term goes up, and indices potentially regress after the +// log is truncated by this newer term write. +func (l *LogTracker) Append(ctx context.Context, after uint64, to LogMark) bool { + // Fast path. We are at the same term. The log must be contiguous. + if to.Term == l.last.Term { + if after != l.last.Index { + l.errorf(ctx, "append (%d,%d]@%d out of order", after, to.Index, to.Term) + return false + } + l.last.Index = to.Index + return false + } + if to.Term < l.last.Term || after > l.last.Index { + // Does not happen. Log writes are always ordered by LogMark, and have no + // gaps. Gaps can only appear when the log is cleared in response to storing + // a snapshot, which is handled by the SnapSynced method. + l.errorf(ctx, "append (%d,%d]@%d out of order", after, to.Index, to.Term) + return false + } + // Invariant: to.Term > l.last.Term && after <= l.last.Index. + l.last = to + // The effective stable and admitted indices can potentially regress here. + // This happens when a new leader overwrites a suffix of the log. + l.stable = min(l.stable, after) + // Entries at index > after.Index from previous terms are now obsolete. + for pri, marks := range l.waiting { + l.waiting[pri] = truncate(marks, after) + } + // The leader term was bumped, so the admitted vector is new for this term, + // and is considered "changed". + return true +} + +// Register informs the tracker that the entry at the given log mark is about to +// be sent to admission queue with the given priority. Must be called in the +// LogMark order, at most once for each entry. +// +// Typically, every batch of appended log entries should call Append once, +// followed by a sequence of Register calls for individual entries that are +// subject to admission control. +func (l *LogTracker) Register(ctx context.Context, at LogMark, pri raftpb.Priority) { + if at.Term != l.last.Term || at.Index <= l.stable { + // Does not happen. Entries must be registered before being sent to storage, + // so an entry can't become stable before the Register call. + l.errorf(ctx, "admission register %+v [pri=%v] out of order", at, pri) + return + } + ln := len(l.waiting[pri]) + if ln != 0 && at.Index <= l.waiting[pri][ln-1].Index { + l.errorf(ctx, "admission register %+v [pri=%v] out of order", at, pri) + return + } + l.waiting[pri] = append(l.waiting[pri], at) +} + +// LogSynced informs the tracker that the log up to the given LogMark has been +// persisted to stable storage. +// +// All writes are done in the LogMark order, but the corresponding LogSynced +// calls can be skipped or invoked in any order, e.g. due to delivery +// concurrency. The tracker keeps the latest (in the logical sense) stable mark. +// +// Returns true if the admitted vector has advanced. +func (l *LogTracker) LogSynced(ctx context.Context, stable LogMark) bool { + if stable.After(l.last) { + // Does not happen. The write must have been registered with Append call. + l.errorf(ctx, "syncing mark %+v before appending it", stable) + return false + } + if stable.Term != l.last.Term || stable.Index <= l.stable { + // TODO(pav-kv): we can move the stable index up for a stale Term too, if we + // track leader term for each entry (see LogAdmitted), or forks of the log. + return false + } + maybeAdmitted := l.stable + 1 + l.stable = stable.Index + // The admitted index at a priority has advanced if its queue was empty or + // leading the stable index by more than one. + for _, marks := range l.waiting { + // Example: stable index was 5 before this call. If marks[0].Index <= 6 then + // we can't advance past 5 even if stable index advances to a higher value. + // But if marks[0].Index >= 7 we can advance to marks[0].Index-1 which is + // greater than the old stable index. + if len(marks) == 0 || marks[0].Index > maybeAdmitted { + return true + } + } + return false +} + +// LogAdmitted informs the tracker that the log up to the given LogMark has been +// logically admitted to storage, at the given priority. +// +// All writes are done in the LogMark order, but the corresponding LogAdmitted +// calls can be skipped or invoked in any order, e.g. due to delivery +// concurrency. The tracker accounts for the latest (in the logical sense) +// admission. +// +// LogAdmitted and LogSynced for the same entry/mark can happen in any order +// too, since the write and admission queues are decoupled from each other, and +// there can be concurrency in the signal delivery. LogTracker dampens this by +// capping admitted indices at stable index (see Admitted), so that admissions +// appear to happen after log syncs. +// +// Returns true if the admitted vector has advanced. +func (l *LogTracker) LogAdmitted(ctx context.Context, at LogMark, pri raftpb.Priority) bool { + if at.After(l.last) { + // Does not happen. The write must have been registered with Append call. + l.errorf(ctx, "admitting mark %+v before appending it", at) + return false + } + waiting := l.waiting[pri] + // There is nothing to admit, or it's a stale admission. + if len(waiting) == 0 || waiting[0].After(at) { + return false + } + // At least one waiting entry can be admitted. The admitted index was at + // min(l.stable, waiting[0].Index-1). If waiting[0].Index-1 < l.stable, the + // min increases after the first entry is removed from the queue. + updated := waiting[0].Index <= l.stable + // Remove all entries up to the admitted mark. Due to invariants, this is + // always a prefix of the queue. + for i, ln := 1, len(waiting); i < ln; i++ { + if waiting[i].After(at) { + l.waiting[pri] = waiting[i:] + return updated + } + } + // The entire queue is admitted, clear it. + l.waiting[pri] = waiting[len(waiting):] + return updated +} + +// SnapSynced informs the tracker that a snapshot at the given log mark has been +// stored/synced, and the log is cleared. +// +// Returns true if the admitted vector has changed. +func (l *LogTracker) SnapSynced(ctx context.Context, mark LogMark) bool { + if !mark.After(l.last) { + l.errorf(ctx, "syncing stale snapshot %+v", mark) + return false + } + // Fake an append spanning the gap between the log and the snapshot. It will, + // if necessary, truncate the stable index and remove entries waiting for + // admission that became obsolete. + updated := l.Append(ctx, min(l.last.Index, mark.Index), mark) + if l.LogSynced(ctx, mark) { + return true + } + return updated +} + +// String returns a string representation of the LogTracker. +func (l *LogTracker) String() string { + return redact.StringWithoutMarkers(l) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (l *LogTracker) SafeFormat(w redact.SafePrinter, _ rune) { + admitted := l.Admitted().Admitted + w.Printf("mark:%+v, stable:%d, admitted:%v", l.last, l.stable, admitted) +} + +// DebugString returns a debug string for this tracker. +func (l *LogTracker) DebugString() string { + var b strings.Builder + fmt.Fprint(&b, l.String()) + for pri, marks := range l.waiting { + if len(marks) == 0 { + continue + } + fmt.Fprintf(&b, "\n%s:", raftpb.Priority(pri)) + for _, mark := range marks { + fmt.Fprintf(&b, " %+v", mark) + } + } + return b.String() +} + +func (l *LogTracker) errorf(ctx context.Context, format string, args ...any) { + format += "\n%s" + args = append(args, l.String()) + if buildutil.CrdbTestBuild { + panic(errors.AssertionFailedf(format, args...)) + } else { + log.Errorf(ctx, format, args...) + } +} + +// truncate returns a prefix of the ordered log marks slice, with all marks at +// index > after removed from it. +func truncate(marks []LogMark, after uint64) []LogMark { + for i := len(marks); i > 0; i-- { + if marks[i-1].Index <= after { + return marks[:i] + } + } + return marks[:0] +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker_test.go new file mode 100644 index 000000000000..6167068e5fb8 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/log_tracker_test.go @@ -0,0 +1,261 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + "testing" + + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/datadriven" + "github.com/stretchr/testify/require" +) + +func mark(term, index uint64) LogMark { + return LogMark{Term: term, Index: index} +} + +func (l *LogTracker) check(t *testing.T) { + require.LessOrEqual(t, l.stable, l.last.Index) + stable := l.Stable() + require.Equal(t, l.last.Term, stable.Term) + for _, waiting := range l.waiting { + if ln := len(waiting); ln != 0 { + require.LessOrEqual(t, waiting[ln-1].Index, l.last.Index) + require.LessOrEqual(t, waiting[ln-1].Term, l.last.Term) + } + for i, ln := 1, len(waiting); i < ln; i++ { + require.Less(t, waiting[i-1].Index, waiting[i].Index) + require.LessOrEqual(t, waiting[i-1].Term, waiting[i].Term) + } + } + a := l.Admitted() + require.Equal(t, stable.Term, a.Term) + for _, index := range a.Admitted { + require.LessOrEqual(t, index, stable.Index) + } +} + +func TestLogTrackerAppend(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + for _, tt := range []struct { + last LogMark + stable uint64 + + after uint64 + to LogMark + notOk bool + want uint64 // stable index + }{ + // Invalid appends. Writes with stale term or index gaps. + {last: mark(10, 100), after: 100, to: mark(9, 200), notOk: true}, + {last: mark(10, 100), after: 200, to: mark(10, 300), notOk: true}, + {last: mark(10, 100), after: 20, to: mark(10, 50), notOk: true}, + // Valid appends. + {after: 0, to: mark(10, 100), want: 0}, + {last: mark(10, 100), after: 100, to: mark(10, 150)}, + {last: mark(10, 100), after: 100, to: mark(15, 150)}, + {last: mark(10, 100), after: 50, to: mark(15, 150)}, + // Stable index does not change. + {last: mark(10, 100), stable: 50, after: 100, to: mark(10, 150), want: 50}, + {last: mark(10, 100), stable: 50, after: 100, to: mark(11, 150), want: 50}, + {last: mark(10, 100), stable: 50, after: 70, to: mark(11, 150), want: 50}, + // Stable index regresses. + {last: mark(10, 100), stable: 50, after: 30, to: mark(11, 150), want: 30}, + } { + t.Run("", func(t *testing.T) { + defer func() { + failed := recover() != nil + require.Equal(t, tt.notOk, failed) + }() + l := NewLogTracker(tt.last) + l.stable = tt.stable + l.check(t) + l.Append(context.Background(), tt.after, tt.to) + l.check(t) + require.Equal(t, tt.to, l.last) + require.Equal(t, tt.want, l.Stable().Index) + }) + } +} + +func TestLogTrackerLogSynced(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + for _, tt := range []struct { + last LogMark + stable uint64 + + sync LogMark + notOk bool + want uint64 // stable index + }{ + // Invalid syncs. + {last: mark(5, 20), sync: mark(7, 10), notOk: true}, + {last: mark(5, 20), sync: mark(5, 25), notOk: true}, + + // Valid syncs. The sync mark <= the latest observed mark. + {last: mark(5, 20), stable: 5, sync: mark(4, 10), want: 5}, + {last: mark(5, 20), stable: 15, sync: mark(4, 10), want: 15}, + {last: mark(5, 20), stable: 15, sync: mark(4, 100), want: 15}, + {last: mark(5, 20), sync: mark(5, 10), want: 10}, + {last: mark(5, 20), stable: 15, sync: mark(5, 10), want: 15}, + {last: mark(5, 20), sync: mark(5, 20), want: 20}, + {last: mark(5, 40), stable: 15, sync: mark(5, 30), want: 30}, + } { + t.Run("", func(t *testing.T) { + defer func() { + failed := recover() != nil + require.Equal(t, tt.notOk, failed) + }() + l := NewLogTracker(tt.last) + l.stable = tt.stable + l.check(t) + l.LogSynced(context.Background(), tt.sync) + l.check(t) + require.Equal(t, tt.last, l.last) + require.Equal(t, tt.want, l.Stable().Index) + }) + } +} + +func TestLogTrackerSnapSynced(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + for _, tt := range []struct { + last LogMark + stable uint64 + snap LogMark + notOk bool + }{ + // Invalid snapshots. + {last: mark(5, 20), snap: mark(4, 30), notOk: true}, + {last: mark(5, 20), snap: mark(5, 10), notOk: true}, + // Valid snapshots. + {last: mark(5, 20), snap: mark(5, 30)}, + {last: mark(5, 20), snap: mark(6, 10)}, + {last: mark(5, 20), snap: mark(6, 30)}, + } { + t.Run("", func(t *testing.T) { + defer func() { + failed := recover() != nil + require.Equal(t, tt.notOk, failed) + }() + l := NewLogTracker(tt.last) + l.stable = tt.stable + l.check(t) + l.SnapSynced(context.Background(), tt.snap) + l.check(t) + require.Equal(t, tt.snap, l.last) + require.Equal(t, tt.snap.Index, l.Stable().Index) + }) + } +} + +func TestLogTracker(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + readPri := func(t *testing.T, d *datadriven.TestData) raftpb.Priority { + var s string + d.ScanArgs(t, "pri", &s) + pri := priFromString(s) + if pri == raftpb.NumPriorities { + t.Fatalf("unknown pri: %s", s) + } + return pri + } + readMark := func(t *testing.T, d *datadriven.TestData, idxName string) LogMark { + var mark LogMark + d.ScanArgs(t, "term", &mark.Term) + d.ScanArgs(t, idxName, &mark.Index) + return mark + } + + var tracker LogTracker + state := func(updated bool) string { + var s string + if updated { + s += "[upd] " + } + return s + tracker.DebugString() + } + + ctx := context.Background() + run := func(t *testing.T, d *datadriven.TestData) (out string) { + defer func() { + if err := recover(); err != nil { + out = "error" + } + }() + + switch d.Cmd { + case "reset": // Example: reset term=1 index=10 + stable := readMark(t, d, "index") + tracker = NewLogTracker(stable) + return state(false) + + case "append": // Example: append term=10 after=100 to=200 + var after uint64 + d.ScanArgs(t, "after", &after) + to := readMark(t, d, "to") + updated := tracker.Append(ctx, after, to) + return state(updated) + + case "sync": // Example: sync term=10 index=100 + mark := readMark(t, d, "index") + updated := tracker.LogSynced(ctx, mark) + return state(updated) + + case "register": // Example: register term=10 index=100 pri=LowPri + mark := readMark(t, d, "index") + pri := readPri(t, d) + tracker.Register(ctx, mark, pri) + return state(false) + + case "admit": // Example: admit term=10 index=100 pri=LowPri + mark := readMark(t, d, "index") + pri := readPri(t, d) + updated := tracker.LogAdmitted(ctx, mark, pri) + return state(updated) + + default: + t.Fatalf("unknown command: %s", d.Cmd) + return "" + } + } + + datadriven.RunTest(t, datapathutils.TestDataPath(t, "log_tracker"), run) +} + +// priFromString converts a string to Priority. +// TODO(pav-kv): move to the package next to Priority. +func priFromString(s string) raftpb.Priority { + switch s { + case "LowPri": + return raftpb.LowPri + case "NormalPri": + return raftpb.NormalPri + case "AboveNormalPri": + return raftpb.AboveNormalPri + case "HighPri": + return raftpb.HighPri + default: + return raftpb.NumPriorities + } +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go b/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go new file mode 100644 index 000000000000..56469961d84e --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/metrics.go @@ -0,0 +1,310 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "fmt" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/redact" +) + +// TODO(kvoli): +// - hookup the metrics to the registry +// - call onBypassed etc in replica_rac2.Processor + +// Aliases to make the code below slightly easier to read. +const regular, elastic = admissionpb.RegularWorkClass, admissionpb.ElasticWorkClass + +var ( + flowTokensAvailable = metric.Metadata{ + Name: "kvflowcontrol.tokens.%s.%s.available", + Help: "Flow %s tokens available for %s requests, across all replication streams", + Measurement: "Bytes", + Unit: metric.Unit_BYTES, + } + flowTokensDeducted = metric.Metadata{ + Name: "kvflowcontrol.tokens.%s.%s.deducted", + Help: "Flow %s tokens deducted by %s requests, across all replication streams", + Measurement: "Bytes", + Unit: metric.Unit_BYTES, + } + flowTokensReturned = metric.Metadata{ + Name: "kvflowcontrol.tokens.%s.%s.returned", + Help: "Flow %s tokens returned by %s requests, across all replication streams", + Measurement: "Bytes", + Unit: metric.Unit_BYTES, + } + flowTokensUnaccounted = metric.Metadata{ + Name: "kvflowcontrol.tokens.%s.%s.unaccounted", + Help: "Flow %s tokens returned by %s requests that were unaccounted for, across all replication streams", + Measurement: "Bytes", + Unit: metric.Unit_BYTES, + } + totalStreamCount = metric.Metadata{ + Name: "kvflowcontrol.streams.%s.%s.total_count", + Help: "Total number of %s replication streams for %s requests", + Measurement: "Count", + Unit: metric.Unit_COUNT, + } + blockedStreamCount = metric.Metadata{ + Name: "kvflowcontrol.streams.%s.%s.blocked_count", + Help: "Number of %s replication streams with no flow tokens available for %s requests", + Measurement: "Count", + Unit: metric.Unit_COUNT, + } + // WaitForEval metrics. + requestsWaiting = metric.Metadata{ + Name: "kvflowcontrol.eval_wait.%s.requests.waiting", + Help: "Number of %s requests waiting for flow tokens", + Measurement: "Requests", + Unit: metric.Unit_COUNT, + } + requestsAdmitted = metric.Metadata{ + Name: "kvflowcontrol.eval_wait.%s.requests.admitted", + Help: "Number of %s requests admitted by the flow controller", + Measurement: "Requests", + Unit: metric.Unit_COUNT, + } + requestsErrored = metric.Metadata{ + Name: "kvflowcontrol.eval_wait.%s.requests.errored", + Help: "Number of %s requests that errored out while waiting for flow tokens", + Measurement: "Requests", + Unit: metric.Unit_COUNT, + } + requestsBypassed = metric.Metadata{ + Name: "kvflowcontrol.eval_wait.%s.requests.bypassed", + Help: "Number of waiting %s requests that bypassed the flow " + + "controller due the evaluating replica not being the leader", + Measurement: "Requests", + Unit: metric.Unit_COUNT, + } + waitDuration = metric.Metadata{ + Name: "kvflowcontrol.eval_wait.%s.duration", + Help: "Latency histogram for time %s requests spent waiting for flow tokens to evaluate", + Measurement: "Nanoseconds", + Unit: metric.Unit_NANOSECONDS, + } +) + +// annotateMetricTemplateWithWorkClass uses the given metric template to build +// one suitable for the specific token type and work class. +func annotateMetricTemplateWithWorkClassAndType( + wc admissionpb.WorkClass, tmpl metric.Metadata, t flowControlMetricType, +) metric.Metadata { + rv := tmpl + rv.Name = fmt.Sprintf(tmpl.Name, t, wc) + rv.Help = fmt.Sprintf(tmpl.Help, t, wc) + return rv +} + +// annotateMetricTemplateWithWorkClass uses the given metric template to build +// one suitable for the specific work class. +func annotateMetricTemplateWithWorkClass( + wc admissionpb.WorkClass, tmpl metric.Metadata, +) metric.Metadata { + rv := tmpl + rv.Name = fmt.Sprintf(tmpl.Name, wc) + rv.Help = fmt.Sprintf(tmpl.Help, wc) + return rv +} + +type flowControlMetricType int + +const ( + flowControlEvalMetricType flowControlMetricType = iota + flowControlSendMetricType + numFlowControlMetricTypes +) + +func (f flowControlMetricType) String() string { + return redact.StringWithoutMarkers(f) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (f flowControlMetricType) SafeFormat(p redact.SafePrinter, _ rune) { + switch f { + case flowControlEvalMetricType: + p.SafeString("eval") + case flowControlSendMetricType: + p.SafeString("send") + default: + panic("unknown flowControlMetricType") + } +} + +type TokenMetrics struct { + CounterMetrics [numFlowControlMetricTypes]*TokenCounterMetrics + StreamMetrics [numFlowControlMetricTypes]*TokenStreamMetrics +} + +var _ metric.Struct = &TokenMetrics{} + +// TokenMetrics implements the metric.Struct interface. +func (m *TokenMetrics) MetricStruct() {} + +func NewTokenMetrics() *TokenMetrics { + m := &TokenMetrics{} + for _, typ := range []flowControlMetricType{ + flowControlEvalMetricType, + flowControlSendMetricType, + } { + m.CounterMetrics[typ] = newTokenCounterMetrics(typ) + m.StreamMetrics[typ] = newTokenStreamMetrics(typ) + } + return m +} + +type TokenCounterMetrics struct { + Deducted [admissionpb.NumWorkClasses]*metric.Counter + Returned [admissionpb.NumWorkClasses]*metric.Counter + Unaccounted [admissionpb.NumWorkClasses]*metric.Counter +} + +var _ metric.Struct = &TokenCounterMetrics{} + +// TokenCounterMetrics implements the metric.Struct interface. +func (m *TokenCounterMetrics) MetricStruct() {} + +func newTokenCounterMetrics(t flowControlMetricType) *TokenCounterMetrics { + m := &TokenCounterMetrics{} + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + m.Deducted[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClassAndType(wc, flowTokensDeducted, t), + ) + m.Returned[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClassAndType(wc, flowTokensReturned, t), + ) + m.Unaccounted[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClassAndType(wc, flowTokensUnaccounted, t), + ) + } + return m +} + +func (m *TokenCounterMetrics) onTokenAdjustment(adjustment tokensPerWorkClass) { + if adjustment.regular < 0 { + m.Deducted[regular].Inc(-int64(adjustment.regular)) + } else if adjustment.regular > 0 { + m.Returned[regular].Inc(int64(adjustment.regular)) + } + if adjustment.elastic < 0 { + m.Deducted[elastic].Inc(-int64(adjustment.elastic)) + } else if adjustment.elastic > 0 { + m.Returned[elastic].Inc(int64(adjustment.elastic)) + } +} + +func (m *TokenCounterMetrics) onUnaccounted(unaccounted tokensPerWorkClass) { + m.Unaccounted[regular].Inc(int64(unaccounted.regular)) + m.Unaccounted[elastic].Inc(int64(unaccounted.elastic)) +} + +type TokenStreamMetrics struct { + Count [admissionpb.NumWorkClasses]*metric.Gauge + BlockedCount [admissionpb.NumWorkClasses]*metric.Gauge + TokensAvailable [admissionpb.NumWorkClasses]*metric.Gauge +} + +var _ metric.Struct = &TokenStreamMetrics{} + +// TokenCounterMetrics implements the metric.Struct interface. +func (m *TokenStreamMetrics) MetricStruct() {} + +func newTokenStreamMetrics(t flowControlMetricType) *TokenStreamMetrics { + m := &TokenStreamMetrics{} + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + m.Count[wc] = metric.NewGauge( + annotateMetricTemplateWithWorkClassAndType(wc, totalStreamCount, t), + ) + m.BlockedCount[wc] = metric.NewGauge( + annotateMetricTemplateWithWorkClassAndType(wc, blockedStreamCount, t), + ) + m.TokensAvailable[wc] = metric.NewGauge( + annotateMetricTemplateWithWorkClassAndType(wc, flowTokensAvailable, t), + ) + } + return m +} + +type EvalWaitMetrics struct { + Waiting [admissionpb.NumWorkClasses]*metric.Gauge + Admitted [admissionpb.NumWorkClasses]*metric.Counter + Errored [admissionpb.NumWorkClasses]*metric.Counter + Bypassed [admissionpb.NumWorkClasses]*metric.Counter + Duration [admissionpb.NumWorkClasses]metric.IHistogram +} + +var _ metric.Struct = &EvalWaitMetrics{} + +// EvalWaitMetrics implements the metric.Struct interface. +func (m *EvalWaitMetrics) MetricStruct() {} + +func NewEvalWaitMetrics() *EvalWaitMetrics { + m := &EvalWaitMetrics{} + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + m.Waiting[wc] = metric.NewGauge( + annotateMetricTemplateWithWorkClass(wc, requestsWaiting), + ) + m.Admitted[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClass(wc, requestsAdmitted), + ) + m.Errored[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClass(wc, requestsErrored), + ) + m.Bypassed[wc] = metric.NewCounter( + annotateMetricTemplateWithWorkClass(wc, requestsBypassed), + ) + m.Duration[wc] = metric.NewHistogram( + metric.HistogramOptions{ + Metadata: annotateMetricTemplateWithWorkClass(wc, waitDuration), + Duration: base.DefaultHistogramWindowInterval(), + BucketConfig: metric.IOLatencyBuckets, + Mode: metric.HistogramModePrometheus, + }, + ) + } + return m +} + +func (e *EvalWaitMetrics) OnWaiting(wc admissionpb.WorkClass) { + e.Waiting[wc].Inc(1) +} + +func (e *EvalWaitMetrics) OnAdmitted(wc admissionpb.WorkClass, dur time.Duration) { + e.Admitted[wc].Inc(1) + e.Waiting[wc].Dec(1) + e.Duration[wc].RecordValue(dur.Nanoseconds()) +} + +func (e *EvalWaitMetrics) OnBypassed(wc admissionpb.WorkClass, dur time.Duration) { + e.Bypassed[wc].Inc(1) + e.Waiting[wc].Dec(1) + e.Duration[wc].RecordValue(dur.Nanoseconds()) +} + +func (e *EvalWaitMetrics) OnErrored(wc admissionpb.WorkClass, dur time.Duration) { + e.Errored[wc].Inc(1) + e.Waiting[wc].Dec(1) + e.Duration[wc].RecordValue(dur.Nanoseconds()) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/priority_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/priority_test.go index 9344bd8b394f..35fc7cbaaf4a 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/priority_test.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/priority_test.go @@ -15,10 +15,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) func TestPriority(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + var lastRaftPriority raftpb.Priority lastConvertedBackPriority := admissionpb.LowPri for i := int(admissionpb.LowPri); i < admissionpb.OneAboveHighPri; i++ { diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller.go b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller.go index 01657e220d28..d5c8da72ebf1 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller.go @@ -13,11 +13,21 @@ package rac2 import ( "cmp" "context" + "fmt" + "reflect" "slices" + "time" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftlog" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/raft/tracker" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -29,12 +39,18 @@ import ( type RangeController interface { // WaitForEval seeks admission to evaluate a request at the given priority. // This blocks until there are positive tokens available for the request to - // be admitted for evaluation. Note the number of tokens required by the - // request is not considered, only the priority of the request, as the number - // of tokens is not known until eval. + // be admitted for evaluation, or the context is canceled (which returns an + // error). Note the number of tokens required by the request is not + // considered, only the priority of the request, as the number of tokens is + // not known until eval. + // + // In the non-error case, the waited return value is true if the + // RangeController was not closed during the execution of WaitForEval. If + // closed, a (false, nil) will be returned -- this is important for the + // caller to fall back to waiting on the local store. // // No mutexes should be held. - WaitForEval(ctx context.Context, pri admissionpb.WorkPriority) error + WaitForEval(ctx context.Context, pri admissionpb.WorkPriority) (waited bool, err error) // HandleRaftEventRaftMuLocked handles the provided raft event for the range. // // Requires replica.raftMu to be held. @@ -59,12 +75,66 @@ type RangeController interface { CloseRaftMuLocked(ctx context.Context) } -// TODO(pav-kv): This struct is a placeholder for the interface or struct -// containing raft entries. Replace this as part of #128019. +// TODO(pav-kv): This interface a placeholder for the interface containing raft +// methods. Replace this as part of #128019. +type RaftInterface interface { + // FollowerStateRaftMuLocked returns the current state of a follower. The + // value of Match, Next are populated iff in StateReplicate. All entries >= + // Next have not had MsgApps constructed during the lifetime of this + // StateReplicate (they may have been constructed previously). + // + // When a follower transitions from {StateProbe,StateSnapshot} => + // StateReplicate, we start trying to send MsgApps. We should + // notice such transitions both in HandleRaftEvent and SetReplicasLocked. + // + // Requires Replica.raftMu to be held, Replica.mu is not held. + FollowerStateRaftMuLocked(replicaID roachpb.ReplicaID) FollowerStateInfo +} + +type FollowerStateInfo struct { + State tracker.StateType + + // Remaining only populated in StateReplicate. + // (Match, Next) is in-flight. + Match uint64 + Next uint64 +} + +// AdmittedTracker is used to retrieve the latest admitted vector for a +// replica (including the leader). +type AdmittedTracker interface { + // GetAdmitted returns the latest AdmittedVector for replicaID. It returns + // an empty struct if the replicaID is not known. NB: the + // AdmittedVector.Admitted[i] value can transiently advance past + // FollowerStateInfo.Match, since the admitted tracking subsystem is + // separate from Raft. + GetAdmitted(replicaID roachpb.ReplicaID) AdmittedVector +} + +// RaftEvent carries a RACv2-relevant subset of raft state sent to storage. type RaftEvent struct { + // Term is the leader term on whose behalf the entries or snapshot are + // written. Note that it may be behind the raft node's current term. + Term uint64 + // Snap contains the snapshot to be written to storage. + Snap *raftpb.Snapshot + // Entries contains the log entries to be written to storage. Entries []raftpb.Entry } +// RaftEventFromMsgStorageAppend constructs a RaftEvent from the given raft +// MsgStorageAppend message. Returns zero value if the message is empty. +func RaftEventFromMsgStorageAppend(msg raftpb.Message) RaftEvent { + if msg.Type != raftpb.MsgStorageAppend { + return RaftEvent{} + } + return RaftEvent{ + Term: msg.LogTerm, + Snap: msg.Snapshot, + Entries: msg.Entries, + } +} + // NoReplicaID is a special value of roachpb.ReplicaID, which can never be a // valid ID. const NoReplicaID roachpb.ReplicaID = 0 @@ -99,3 +169,662 @@ func (rs ReplicaSet) SafeFormat(w redact.SafePrinter, _ rune) { func (rs ReplicaSet) String() string { return redact.StringWithoutMarkers(rs) } + +// ProbeToCloseTimerScheduler is an interface for scheduling the closing of a +// replica send stream. +type ProbeToCloseTimerScheduler interface { + // ScheduleSendStreamCloseRaftMuLocked schedules a callback with a raft event + // after the given delay. This function may be used to handle send stream + // state transition, usually to close a send stream after the given delay. + // e.g., + // + // HandleRaftEventRaftMuLocked(ctx, RaftEvent{}) + // + // Which will trigger handleReadyState to close the send stream if it hasn't + // transitioned to StateReplicate. + // + // Requires replica.raftMu to be held. + ScheduleSendStreamCloseRaftMuLocked( + ctx context.Context, rangeID roachpb.RangeID, delay time.Duration) +} + +type RangeControllerOptions struct { + RangeID roachpb.RangeID + TenantID roachpb.TenantID + // LocalReplicaID is the ReplicaID of the local replica, which is the + // leader. + LocalReplicaID roachpb.ReplicaID + // SSTokenCounter provides access to all the TokenCounters that will be + // needed (keyed by (tenantID, storeID)). + SSTokenCounter *StreamTokenCounterProvider + RaftInterface RaftInterface + Clock *hlc.Clock + CloseTimerScheduler ProbeToCloseTimerScheduler + AdmittedTracker AdmittedTracker + EvalWaitMetrics *EvalWaitMetrics +} + +// RangeControllerInitState is the initial state at the time of creation. +type RangeControllerInitState struct { + // Must include RangeControllerOptions.ReplicaID. + ReplicaSet ReplicaSet + // Leaseholder may be set to NoReplicaID, in which case the leaseholder is + // unknown. + Leaseholder roachpb.ReplicaID +} + +type rangeController struct { + opts RangeControllerOptions + replicaSet ReplicaSet + // leaseholder can be NoReplicaID or not be in ReplicaSet, i.e., it is + // eventually consistent with the set of replicas. + leaseholder roachpb.ReplicaID + + mu struct { + syncutil.Mutex + + // State for waiters. When anything in voterSets changes, voterSetRefreshCh + // is closed, and replaced with a new channel. The voterSets is + // copy-on-write, so waiters make a shallow copy. + voterSets []voterSet + voterSetRefreshCh chan struct{} + } + + replicaMap map[roachpb.ReplicaID]*replicaState +} + +// voterStateForWaiters informs whether WaitForEval is required to wait for +// eval-tokens for a voter. +type voterStateForWaiters struct { + replicaID roachpb.ReplicaID + isLeader bool + isLeaseHolder bool + isStateReplicate bool + evalTokenCounter *tokenCounter +} + +type voterSet []voterStateForWaiters + +var _ RangeController = &rangeController{} + +func NewRangeController( + ctx context.Context, o RangeControllerOptions, init RangeControllerInitState, +) *rangeController { + rc := &rangeController{ + opts: o, + leaseholder: init.Leaseholder, + replicaMap: make(map[roachpb.ReplicaID]*replicaState), + } + rc.mu.voterSetRefreshCh = make(chan struct{}) + rc.updateReplicaSet(ctx, init.ReplicaSet) + rc.updateVoterSets() + return rc +} + +// WaitForEval blocks until there are positive tokens available for the +// request to be admitted for evaluation. Note the number of tokens required +// by the request is not considered, only the priority of the request, as the +// number of tokens is not known until eval. +// +// No mutexes should be held. +func (rc *rangeController) WaitForEval( + ctx context.Context, pri admissionpb.WorkPriority, +) (waited bool, err error) { + wc := admissionpb.WorkClassFromPri(pri) + waitForAllReplicateHandles := false + if wc == admissionpb.ElasticWorkClass { + waitForAllReplicateHandles = true + } + var handles []tokenWaitingHandleInfo + var scratch []reflect.SelectCase + + rc.opts.EvalWaitMetrics.OnWaiting(wc) + start := rc.opts.Clock.PhysicalTime() +retry: + // Snapshot the voterSets and voterSetRefreshCh. + rc.mu.Lock() + vss := rc.mu.voterSets + vssRefreshCh := rc.mu.voterSetRefreshCh + rc.mu.Unlock() + + if vssRefreshCh == nil { + // RangeControllerImpl is closed. + // TODO(kvoli): We also need to do this in the replica_rac2.Processor, + // which will allow requests to bypass when a replica is not the leader and + // therefore the controller is closed. + rc.opts.EvalWaitMetrics.OnBypassed(wc, rc.opts.Clock.PhysicalTime().Sub(start)) + return false, nil + } + for _, vs := range vss { + quorumCount := (len(vs) + 2) / 2 + haveEvalTokensCount := 0 + handles = handles[:0] + requiredWait := false + for _, v := range vs { + available, handle := v.evalTokenCounter.TokensAvailable(wc) + if available { + haveEvalTokensCount++ + continue + } + + // Don't have eval tokens, and have a handle. + handleInfo := tokenWaitingHandleInfo{ + handle: handle, + requiredWait: v.isLeader || v.isLeaseHolder || + (waitForAllReplicateHandles && v.isStateReplicate), + } + handles = append(handles, handleInfo) + if !requiredWait && handleInfo.requiredWait { + requiredWait = true + } + } + remainingForQuorum := quorumCount - haveEvalTokensCount + if remainingForQuorum < 0 { + remainingForQuorum = 0 + } + if remainingForQuorum > 0 || requiredWait { + var state WaitEndState + state, scratch = WaitForEval(ctx, vssRefreshCh, handles, remainingForQuorum, scratch) + switch state { + case WaitSuccess: + continue + case ContextCanceled: + rc.opts.EvalWaitMetrics.OnErrored(wc, rc.opts.Clock.PhysicalTime().Sub(start)) + return false, ctx.Err() + case RefreshWaitSignaled: + goto retry + } + } + } + rc.opts.EvalWaitMetrics.OnAdmitted(wc, rc.opts.Clock.PhysicalTime().Sub(start)) + return true, nil +} + +// HandleRaftEventRaftMuLocked handles the provided raft event for the range. +// +// Requires replica.raftMu to be held. +func (rc *rangeController) HandleRaftEventRaftMuLocked(ctx context.Context, e RaftEvent) error { + shouldWaitChange := false + for r, rs := range rc.replicaMap { + info := rc.opts.RaftInterface.FollowerStateRaftMuLocked(r) + shouldWaitChange = rs.handleReadyState(ctx, info) || shouldWaitChange + } + // If there was a quorum change, update the voter sets, triggering the + // refresh channel for any requests waiting for eval tokens. + if shouldWaitChange { + rc.updateVoterSets() + } + + // Compute the flow control state for each entry. We do this once here, + // instead of decoding each entry multiple times for all replicas. + entryStates := make([]entryFCState, len(e.Entries)) + for i, entry := range e.Entries { + entryStates[i] = getEntryFCStateOrFatal(ctx, entry) + } + for _, rs := range rc.replicaMap { + rs.handleReadyEntries(ctx, entryStates) + } + return nil +} + +// HandleSchedulerEventRaftMuLocked processes an event scheduled by the +// controller. +// +// Requires replica.raftMu to be held. +func (rc *rangeController) HandleSchedulerEventRaftMuLocked(ctx context.Context) error { + panic("unimplemented") +} + +// SetReplicasRaftMuLocked sets the replicas of the range. The caller will +// never mutate replicas, and neither should the callee. +// +// Requires replica.raftMu to be held. +func (rc *rangeController) SetReplicasRaftMuLocked(ctx context.Context, replicas ReplicaSet) error { + rc.updateReplicaSet(ctx, replicas) + rc.updateVoterSets() + return nil +} + +// SetLeaseholderRaftMuLocked sets the leaseholder of the range. +// +// Requires raftMu to be held. +func (rc *rangeController) SetLeaseholderRaftMuLocked( + ctx context.Context, replica roachpb.ReplicaID, +) { + if replica == rc.leaseholder { + return + } + rc.leaseholder = replica + rc.updateVoterSets() +} + +// CloseRaftMuLocked closes the range controller. +// +// Requires replica.raftMu to be held. +func (rc *rangeController) CloseRaftMuLocked(ctx context.Context) { + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.mu.voterSets = nil + close(rc.mu.voterSetRefreshCh) + rc.mu.voterSetRefreshCh = nil +} + +func (rc *rangeController) updateReplicaSet(ctx context.Context, newSet ReplicaSet) { + prevSet := rc.replicaSet + for r := range prevSet { + desc, ok := newSet[r] + if !ok { + delete(rc.replicaMap, r) + } else { + rs := rc.replicaMap[r] + rs.desc = desc + } + } + for r, desc := range newSet { + _, ok := prevSet[r] + if ok { + // Already handled above. + continue + } + rc.replicaMap[r] = NewReplicaState(ctx, rc, desc) + } + rc.replicaSet = newSet +} + +func (rc *rangeController) updateVoterSets() { + rc.mu.Lock() + defer rc.mu.Unlock() + + setCount := 1 + for _, r := range rc.replicaSet { + isOld := r.IsVoterOldConfig() + isNew := r.IsVoterNewConfig() + if !isOld && !isNew { + continue + } + if !isOld && isNew { + setCount++ + break + } + } + var voterSets []voterSet + for len(voterSets) < setCount { + voterSets = append(voterSets, voterSet{}) + } + for _, r := range rc.replicaSet { + isOld := r.IsVoterOldConfig() + isNew := r.IsVoterNewConfig() + if !isOld && !isNew { + continue + } + // Is a voter. + rs := rc.replicaMap[r.ReplicaID] + vsfw := voterStateForWaiters{ + replicaID: r.ReplicaID, + isLeader: r.ReplicaID == rc.opts.LocalReplicaID, + isLeaseHolder: r.ReplicaID == rc.leaseholder, + isStateReplicate: rs.isStateReplicate(), + evalTokenCounter: rs.evalTokenCounter, + } + if isOld { + voterSets[0] = append(voterSets[0], vsfw) + } + if isNew && setCount == 2 { + voterSets[1] = append(voterSets[1], vsfw) + } + } + rc.mu.voterSets = voterSets + close(rc.mu.voterSetRefreshCh) + rc.mu.voterSetRefreshCh = make(chan struct{}) +} + +type replicaState struct { + parent *rangeController + // stream aggregates across the streams for the same (tenant, store). This + // is the identity that is used to deduct tokens or wait for tokens to be + // positive. + stream kvflowcontrol.Stream + evalTokenCounter, sendTokenCounter *tokenCounter + desc roachpb.ReplicaDescriptor + + sendStream *replicaSendStream +} + +func NewReplicaState( + ctx context.Context, parent *rangeController, desc roachpb.ReplicaDescriptor, +) *replicaState { + stream := kvflowcontrol.Stream{TenantID: parent.opts.TenantID, StoreID: desc.StoreID} + rs := &replicaState{ + parent: parent, + stream: stream, + evalTokenCounter: parent.opts.SSTokenCounter.Eval(stream), + sendTokenCounter: parent.opts.SSTokenCounter.Send(stream), + desc: desc, + } + state := parent.opts.RaftInterface.FollowerStateRaftMuLocked(desc.ReplicaID) + if state.State == tracker.StateReplicate { + rs.createReplicaSendStream() + } + + return rs +} + +type replicaSendStream struct { + parent *replicaState + + mu struct { + syncutil.Mutex + // connectedStateStart is the time when the connectedState was last + // transitioned from one state to another e.g., from replicate to + // probeRecentlyReplicate or snapshot to replicate. + connectedState connectedState + connectedStateStart time.Time + tracker Tracker + closed bool + } +} + +func (rss *replicaSendStream) changeConnectedStateLocked(state connectedState, now time.Time) { + rss.mu.connectedState = state + rss.mu.connectedStateStart = now +} + +func (rs *replicaState) createReplicaSendStream() { + // Must be in StateReplicate on creation. + rs.sendStream = &replicaSendStream{ + parent: rs, + } + rs.sendStream.mu.tracker.Init(rs.stream) + rs.sendStream.mu.closed = false + rs.sendStream.changeConnectedStateLocked( + replicate, rs.parent.opts.Clock.PhysicalTime()) +} + +func (rs *replicaState) isStateReplicate() bool { + if rs.sendStream == nil { + return false + } + rs.sendStream.mu.Lock() + defer rs.sendStream.mu.Unlock() + + return rs.sendStream.mu.connectedState.shouldWaitForElasticEvalTokens() +} + +type entryFCState struct { + term, index uint64 + usesFlowControl bool + tokens kvflowcontrol.Tokens + pri raftpb.Priority +} + +// getEntryFCStateOrFatal returns the given entry's flow control state. If the +// entry encoding cannot be determined, a fatal is logged. +func getEntryFCStateOrFatal(ctx context.Context, entry raftpb.Entry) entryFCState { + enc, pri, err := raftlog.EncodingOf(entry) + if err != nil { + log.Fatalf(ctx, "error getting encoding of entry: %v", err) + } + + if enc == raftlog.EntryEncodingStandardWithAC || enc == raftlog.EntryEncodingSideloadedWithAC { + // When the entry is encoded with the v1 encoding, we don't have access to + // the priority via the priority bit and would need to decode the admission + // metadata. Instead, assume the priority is low priority, which is the + // only sane flow control priority enforcement level in v1 (elastic only). + pri = raftpb.LowPri + } + + return entryFCState{ + index: entry.Index, + term: entry.Term, + usesFlowControl: enc.UsesAdmissionControl(), + tokens: kvflowcontrol.Tokens(len(entry.Data)), + pri: pri, + } +} + +func (rs *replicaState) handleReadyEntries(ctx context.Context, entries []entryFCState) { + if rs.sendStream == nil { + return + } + + rs.sendStream.mu.Lock() + defer rs.sendStream.mu.Unlock() + + for _, entry := range entries { + if !entry.usesFlowControl { + continue + } + rs.sendStream.mu.tracker.Track(ctx, entry.term, entry.index, entry.pri, entry.tokens) + rs.evalTokenCounter.Deduct( + ctx, WorkClassFromRaftPriority(entry.pri), entry.tokens) + rs.sendTokenCounter.Deduct( + ctx, WorkClassFromRaftPriority(entry.pri), entry.tokens) + } +} + +// handleReadyState handles state management for the replica based on the +// provided follower state information. If the state changes in a way that +// affects requests waiting for evaluation, returns true. +func (rs *replicaState) handleReadyState( + ctx context.Context, info FollowerStateInfo, +) (shouldWaitChange bool) { + switch info.State { + case tracker.StateProbe: + if rs.sendStream == nil { + // We have already closed the stream, nothing to do. + return false + } + if shouldClose := func() (should bool) { + now := rs.parent.opts.Clock.PhysicalTime() + rs.sendStream.mu.Lock() + defer rs.sendStream.mu.Unlock() + + if state := rs.sendStream.mu.connectedState; state == probeRecentlyReplicate && + now.Sub(rs.sendStream.mu.connectedStateStart) >= probeRecentlyReplicateDuration() { + // The replica has been in StateProbe for at least + // probeRecentlyReplicateDuration (default 1s) second, close the + // stream. + should = true + } else if state != probeRecentlyReplicate { + // This is the first time we've seen the replica change to StateProbe, + // update the connected state and start time. If the state doesn't + // change within probeRecentlyReplicateDuration, we will close the + // stream. Also schedule an event, so that even if there are no + // entries, we will still reliably close the stream if still in + // StateProbe. + // + // TODO(sumeer): think through whether we should actually be returning + // tokens immediately here. Currently we are not. e.g., + // probeRecentlyReplicate only affects whether to wait on this replica + // for eval, and otherwise it behaves like a closed replicaSendStream. + rs.sendStream.changeConnectedStateLocked(probeRecentlyReplicate, now) + rs.parent.opts.CloseTimerScheduler.ScheduleSendStreamCloseRaftMuLocked( + ctx, rs.parent.opts.RangeID, probeRecentlyReplicateDuration()) + } + return should + }(); shouldClose { + rs.closeSendStream(ctx) + shouldWaitChange = true + } + + case tracker.StateReplicate: + if rs.sendStream == nil { + rs.createReplicaSendStream() + shouldWaitChange = true + } else { + shouldWaitChange = rs.sendStream.makeConsistentInStateReplicate(ctx) + } + + case tracker.StateSnapshot: + if rs.sendStream != nil { + switch func() connectedState { + rs.sendStream.mu.Lock() + defer rs.sendStream.mu.Unlock() + return rs.sendStream.mu.connectedState + }() { + case replicate: + rs.sendStream.changeToStateSnapshot(ctx) + shouldWaitChange = true + case probeRecentlyReplicate: + rs.closeSendStream(ctx) + shouldWaitChange = true + case snapshot: + } + } + } + return shouldWaitChange +} + +func (rss *replicaState) closeSendStream(ctx context.Context) { + rss.sendStream.mu.Lock() + defer rss.sendStream.mu.Unlock() + + if rss.sendStream.mu.connectedState != snapshot { + // changeToStateSnapshot returns all tokens, as we have no liveness + // guarantee of their return with the send stream now closed. + rss.sendStream.changeToStateSnapshotLocked(ctx) + } + rss.sendStream.mu.closed = true + rss.sendStream = nil +} + +func (rss *replicaSendStream) makeConsistentInStateReplicate( + ctx context.Context, +) (shouldWaitChange bool) { + av := rss.parent.parent.opts.AdmittedTracker.GetAdmitted(rss.parent.desc.ReplicaID) + rss.mu.Lock() + defer rss.mu.Unlock() + defer rss.returnTokens(ctx, rss.mu.tracker.Untrack(av.Term, av.Admitted)) + + // The leader is always in state replicate. + if rss.parent.parent.opts.LocalReplicaID == rss.parent.desc.ReplicaID { + if rss.mu.connectedState != replicate { + log.Fatalf(ctx, "%v", errors.AssertionFailedf( + "leader should always be in state replicate but found in %v", + rss.mu.connectedState)) + } + return false + } + + // Follower replica case. Update the connected state. + switch rss.mu.connectedState { + case replicate: + case probeRecentlyReplicate: + // NB: We could re-use the current time and acquire it outside of the + // mutex, but we expect transitions to replicate to be rarer than replicas + // remaining in replicate. + rss.changeConnectedStateLocked(replicate, rss.parent.parent.opts.Clock.PhysicalTime()) + case snapshot: + rss.changeConnectedStateLocked(replicate, rss.parent.parent.opts.Clock.PhysicalTime()) + shouldWaitChange = true + } + return shouldWaitChange +} + +// changeToStateSnapshot changes the connected state to snapshot and returns +// all tracked entries' tokens. +func (rss *replicaSendStream) changeToStateSnapshot(ctx context.Context) { + rss.mu.Lock() + defer rss.mu.Unlock() + + rss.changeToStateSnapshotLocked(ctx) +} + +// changeToStateSnapshot changes the connected state to snapshot and returns +// all tracked entries' tokens. +// +// Requires rs.mu to be held. +func (rss *replicaSendStream) changeToStateSnapshotLocked(ctx context.Context) { + rss.changeConnectedStateLocked(snapshot, rss.parent.parent.opts.Clock.PhysicalTime()) + // Since the replica is now in StateSnapshot, there is no need for Raft to + // send MsgApp pings to discover what has been missed. So there is no + // liveness guarantee on when these tokens will be returned, and therefore we + // return all tokens in the tracker. + rss.returnTokens(ctx, rss.mu.tracker.UntrackAll()) +} + +// returnTokens takes the tokens untracked by the tracker and returns them to +// the eval and send token counters. +func (rss *replicaSendStream) returnTokens( + ctx context.Context, returned [raftpb.NumPriorities]kvflowcontrol.Tokens, +) { + for pri, tokens := range returned { + pri := raftpb.Priority(pri) + if tokens > 0 { + rss.parent.evalTokenCounter.Return(ctx, WorkClassFromRaftPriority(pri), tokens) + rss.parent.sendTokenCounter.Return(ctx, WorkClassFromRaftPriority(pri), tokens) + } + } +} + +// probeRecentlyReplicateDuration is the duration the controller will wait +// after observing a replica in StateProbe before closing the send stream if +// the replica remains in StateProbe. +// +// TODO(kvoli): We will want to make this a cluster setting eventually. +func probeRecentlyReplicateDuration() time.Duration { + return time.Second +} + +type connectedState uint32 + +// Local replicas are always in state replicate. +// +// Initial state for a replicaSendStream is always replicate, since it is +// created in StateReplicate. We don't care about whether the transport is +// connected or disconnected, since there is buffering capacity in the +// RaftTransport, which allows for some buffering and immediate sending when +// the RaftTransport stream reconnects (which may happen before the next +// HandleRaftEvent), which is desirable. +// +// The first false return value from SendRaftMessage will trigger a +// notification to Raft that the replica is unreachable (see +// Replica.sendRaftMessage calling Replica.addUnreachableRemoteReplica), and +// that raftpb.MsgUnreachable will cause the transition out of StateReplicate +// to StateProbe. The false return value happens either when the (generous) +// RaftTransport buffer is full, or when the circuit breaker opens. The +// circuit breaker opens 3-6s after no more TCP packets are flowing. +// +// A single transient message drop, and nack, can also cause a transition to +// StateProbe. At this layer we don't bother distinguishing on why this +// transition happened and first transition to probeRecentlyReplicate. We stay +// in this state for 1 second, and then close the replicaSendStream. +// +// The only difference in behavior between replicate and +// probeRecentlyReplicate is that we don't try to construct MsgApps in the +// latter. +// +// Initial states: replicate +// State transitions: +// +// replicate <=> {probeRecentlyReplicate, snapshot} +// snapshot => replicaSendStream closed (when observe StateProbe) +// probeRecentlyReplicate => replicaSendStream closed (after short delay) +const ( + replicate connectedState = iota + probeRecentlyReplicate + snapshot +) + +func (cs connectedState) shouldWaitForElasticEvalTokens() bool { + return cs == replicate || cs == probeRecentlyReplicate +} + +func (cs connectedState) String() string { + return redact.StringWithoutMarkers(cs) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (cs connectedState) SafeFormat(w redact.SafePrinter, _ rune) { + switch cs { + case replicate: + w.SafeString("replicate") + case probeRecentlyReplicate: + w.SafeString("probeRecentlyReplicate") + case snapshot: + w.SafeString("snapshot") + default: + panic(fmt.Sprintf("unknown connectedState %v", cs)) + } +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go new file mode 100644 index 000000000000..8e49cf6f9e1c --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/range_controller_test.go @@ -0,0 +1,1054 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftlog" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/raft/tracker" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/datadriven" + "github.com/stretchr/testify/require" +) + +// testingRCState is a test state used in TestRangeController. It contains the +// necessary fields to construct RangeControllers and utility methods for +// generating strings representing the state of the RangeControllers. +type testingRCState struct { + t *testing.T + testCtx context.Context + settings *cluster.Settings + stopper *stop.Stopper + ts *timeutil.ManualTime + clock *hlc.Clock + ssTokenCounter *StreamTokenCounterProvider + probeToCloseScheduler ProbeToCloseTimerScheduler + evalMetrics *EvalWaitMetrics + // ranges contains the controllers for each range. It is the main state being + // tested. + ranges map[roachpb.RangeID]*testingRCRange + // setTokenCounters is used to ensure that we only set the initial token + // counts once per counter. + setTokenCounters map[kvflowcontrol.Stream]struct{} + initialRegularTokens kvflowcontrol.Tokens + initialElasticTokens kvflowcontrol.Tokens +} + +func (s *testingRCState) init(t *testing.T, ctx context.Context) { + s.t = t + s.testCtx = ctx + s.settings = cluster.MakeTestingClusterSettings() + s.stopper = stop.NewStopper() + s.ts = timeutil.NewManualTime(timeutil.UnixEpoch) + s.clock = hlc.NewClockForTesting(s.ts) + s.ssTokenCounter = NewStreamTokenCounterProvider(s.settings, s.clock) + s.probeToCloseScheduler = &testingProbeToCloseTimerScheduler{state: s} + s.evalMetrics = NewEvalWaitMetrics() + s.ranges = make(map[roachpb.RangeID]*testingRCRange) + s.setTokenCounters = make(map[kvflowcontrol.Stream]struct{}) + s.initialRegularTokens = kvflowcontrol.Tokens(-1) + s.initialElasticTokens = kvflowcontrol.Tokens(-1) +} + +func sortReplicas(r *testingRCRange) []roachpb.ReplicaDescriptor { + r.mu.Lock() + defer r.mu.Unlock() + + sorted := make([]roachpb.ReplicaDescriptor, 0, len(r.mu.r.replicaSet)) + for _, replica := range r.mu.r.replicaSet { + sorted = append(sorted, replica.desc) + } + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].ReplicaID < sorted[j].ReplicaID + }) + return sorted +} + +func (s *testingRCState) sortRanges() []*testingRCRange { + sorted := make([]*testingRCRange, 0, len(s.ranges)) + for _, testRC := range s.ranges { + sorted = append(sorted, testRC) + } + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].mu.r.rangeID < sorted[j].mu.r.rangeID + }) + return sorted +} + +func (s *testingRCState) rangeStateString() string { + var b strings.Builder + + for _, testRC := range s.sortRanges() { + // We retain the lock until the end of the function call. We also ensure + // that locking is done in order of rangeID, to avoid inconsistent lock + // ordering leading to deadlocks. + testRC.mu.Lock() + defer testRC.mu.Unlock() + + replicaIDs := make([]int, 0, len(testRC.mu.r.replicaSet)) + for replicaID := range testRC.mu.r.replicaSet { + replicaIDs = append(replicaIDs, int(replicaID)) + } + sort.Ints(replicaIDs) + + fmt.Fprintf(&b, "r%d: [", testRC.mu.r.rangeID) + for i, replicaID := range replicaIDs { + replica := testRC.mu.r.replicaSet[roachpb.ReplicaID(replicaID)] + if i > 0 { + fmt.Fprintf(&b, ",") + } + fmt.Fprintf(&b, "%v", replica.desc) + if replica.desc.ReplicaID == testRC.rc.leaseholder { + fmt.Fprint(&b, "*") + } + } + fmt.Fprintf(&b, "]\n") + } + return b.String() +} +func (s *testingRCState) tokenCountsString() string { + var b strings.Builder + var streams []kvflowcontrol.Stream + s.ssTokenCounter.evalCounters.Range(func(k kvflowcontrol.Stream, v *tokenCounter) bool { + streams = append(streams, k) + return true + }) + sort.Slice(streams, func(i, j int) bool { + return streams[i].StoreID < streams[j].StoreID + }) + for _, stream := range streams { + fmt.Fprintf(&b, "%v: %v\n", stream, s.ssTokenCounter.Eval(stream)) + } + return b.String() +} + +func (s *testingRCState) evalStateString() string { + var b strings.Builder + + time.Sleep(20 * time.Millisecond) + for _, testRC := range s.sortRanges() { + // We retain the lock until the end of the function call, similar to + // above. + testRC.mu.Lock() + defer testRC.mu.Unlock() + + fmt.Fprintf(&b, "range_id=%d tenant_id=%d local_replica_id=%d\n", + testRC.mu.r.rangeID, testRC.mu.r.tenantID, testRC.mu.r.localReplicaID) + // Sort the evals by name to ensure deterministic output. + evals := make([]string, 0, len(testRC.mu.evals)) + for name := range testRC.mu.evals { + evals = append(evals, name) + } + sort.Strings(evals) + for _, name := range evals { + eval := testRC.mu.evals[name] + fmt.Fprintf(&b, " name=%s pri=%-8v done=%-5t waited=%-5t err=%v\n", name, eval.pri, + eval.done, eval.waited, eval.err) + } + } + return b.String() +} + +func (s *testingRCState) sendStreamString(rangeID roachpb.RangeID) string { + var b strings.Builder + + for _, desc := range sortReplicas(s.ranges[rangeID]) { + replica := s.ranges[rangeID].rc.replicaMap[desc.ReplicaID] + fmt.Fprintf(&b, "%v: ", desc) + if replica.sendStream == nil { + fmt.Fprintf(&b, "closed\n") + continue + } + replica.sendStream.mu.Lock() + defer replica.sendStream.mu.Unlock() + + fmt.Fprintf(&b, "state=%v closed=%v\n", + replica.sendStream.mu.connectedState, replica.sendStream.mu.closed) + b.WriteString(formatTrackerState(&replica.sendStream.mu.tracker)) + b.WriteString("++++\n") + } + return b.String() +} + +func (s *testingRCState) maybeSetInitialTokens(r testingRange) { + for _, replica := range r.replicaSet { + stream := kvflowcontrol.Stream{ + StoreID: replica.desc.StoreID, + TenantID: r.tenantID, + } + if _, ok := s.setTokenCounters[stream]; !ok { + s.setTokenCounters[stream] = struct{}{} + if s.initialRegularTokens != -1 { + s.ssTokenCounter.Eval(stream).testingSetTokens(s.testCtx, + admissionpb.RegularWorkClass, s.initialRegularTokens) + s.ssTokenCounter.Send(stream).testingSetTokens(s.testCtx, + admissionpb.RegularWorkClass, s.initialRegularTokens) + } + if s.initialElasticTokens != -1 { + s.ssTokenCounter.Eval(stream).testingSetTokens(s.testCtx, + admissionpb.ElasticWorkClass, s.initialElasticTokens) + s.ssTokenCounter.Send(stream).testingSetTokens(s.testCtx, + admissionpb.ElasticWorkClass, s.initialElasticTokens) + } + } + } +} + +func (s *testingRCState) getOrInitRange(r testingRange) *testingRCRange { + testRC, ok := s.ranges[r.rangeID] + if !ok { + testRC = &testingRCRange{} + testRC.mu.r = r + testRC.mu.evals = make(map[string]*testingRCEval) + options := RangeControllerOptions{ + RangeID: r.rangeID, + TenantID: r.tenantID, + LocalReplicaID: r.localReplicaID, + SSTokenCounter: s.ssTokenCounter, + RaftInterface: testRC, + Clock: s.clock, + CloseTimerScheduler: s.probeToCloseScheduler, + AdmittedTracker: testRC, + EvalWaitMetrics: s.evalMetrics, + } + + init := RangeControllerInitState{ + ReplicaSet: r.replicas(), + Leaseholder: r.localReplicaID, + } + testRC.rc = NewRangeController(s.testCtx, options, init) + s.ranges[r.rangeID] = testRC + } + s.maybeSetInitialTokens(r) + return testRC +} + +type testingRCEval struct { + pri admissionpb.WorkPriority + done bool + waited bool + err error + cancel context.CancelFunc + refreshCh chan struct{} +} + +type testingRCRange struct { + rc *rangeController + + mu struct { + syncutil.Mutex + r testingRange + evals map[string]*testingRCEval + } +} + +func (r *testingRCRange) FollowerStateRaftMuLocked(replicaID roachpb.ReplicaID) FollowerStateInfo { + r.mu.Lock() + defer r.mu.Unlock() + + replica, ok := r.mu.r.replicaSet[replicaID] + if !ok { + return FollowerStateInfo{} + } + return replica.info +} + +func (r *testingRCRange) GetAdmitted(replicaID roachpb.ReplicaID) AdmittedVector { + r.mu.Lock() + defer r.mu.Unlock() + + replica, ok := r.mu.r.replicaSet[replicaID] + if !ok { + return AdmittedVector{} + } + return replica.av +} + +func (r *testingRCRange) startWaitForEval(name string, pri admissionpb.WorkPriority) { + r.mu.Lock() + defer r.mu.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + refreshCh := make(chan struct{}) + r.mu.evals[name] = &testingRCEval{ + err: nil, + cancel: cancel, + refreshCh: refreshCh, + pri: pri, + } + + go func() { + waited, err := r.rc.WaitForEval(ctx, pri) + + r.mu.Lock() + defer r.mu.Unlock() + r.mu.evals[name].waited = waited + r.mu.evals[name].err = err + r.mu.evals[name].done = true + }() +} + +func (r *testingRCRange) admit( + ctx context.Context, + t *testing.T, + storeID roachpb.StoreID, + term uint64, + toIndex uint64, + pri admissionpb.WorkPriority, +) { + r.mu.Lock() + + for _, replica := range r.mu.r.replicaSet { + if replica.desc.StoreID == storeID { + replica := replica + replica.av.Admitted[AdmissionToRaftPriority(pri)] = toIndex + replica.av.Term = term + r.mu.r.replicaSet[replica.desc.ReplicaID] = replica + break + } + } + + r.mu.Unlock() + // Send an empty raft event in order to trigger potential token return. + require.NoError(t, r.rc.HandleRaftEventRaftMuLocked(ctx, RaftEvent{})) +} + +type testingRange struct { + rangeID roachpb.RangeID + tenantID roachpb.TenantID + localReplicaID roachpb.ReplicaID + replicaSet map[roachpb.ReplicaID]testingReplica +} + +func (t testingRange) replicas() ReplicaSet { + replicas := make(ReplicaSet, len(t.replicaSet)) + for i, replica := range t.replicaSet { + replicas[i] = replica.desc + } + return replicas +} + +const invalidTrackerState = tracker.StateSnapshot + 1 + +type testingReplica struct { + desc roachpb.ReplicaDescriptor + info FollowerStateInfo + av AdmittedVector +} + +func scanRanges(t *testing.T, input string) []testingRange { + replicas := []testingRange{} + + for _, line := range strings.Split(input, "\n") { + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + if strings.HasPrefix(parts[0], "range_id=") { + // Create a new range, any replicas which follow until the next range_id + // line will be added to this replica set. + var rangeID, tenantID, localReplicaID int + var err error + + require.True(t, strings.HasPrefix(parts[0], "range_id=")) + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "range_id=") + rangeID, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "tenant_id=")) + parts[1] = strings.TrimPrefix(strings.TrimSpace(parts[1]), "tenant_id=") + tenantID, err = strconv.Atoi(parts[1]) + require.NoError(t, err) + + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "local_replica_id=")) + parts[2] = strings.TrimPrefix(strings.TrimSpace(parts[2]), "local_replica_id=") + localReplicaID, err = strconv.Atoi(parts[2]) + require.NoError(t, err) + + replicas = append(replicas, testingRange{ + rangeID: roachpb.RangeID(rangeID), + tenantID: roachpb.MustMakeTenantID(uint64(tenantID)), + localReplicaID: roachpb.ReplicaID(localReplicaID), + replicaSet: make(map[roachpb.ReplicaID]testingReplica), + }) + } else { + // Otherwise, add the replica to the last replica set created. + replica := scanReplica(t, line) + replicas[len(replicas)-1].replicaSet[replica.desc.ReplicaID] = replica + } + } + + return replicas +} + +func scanReplica(t *testing.T, line string) testingReplica { + var storeID, replicaID int + var replicaType roachpb.ReplicaType + // Default to an invalid state when no state is specified, this will be + // converted to the prior state or StateReplicate if the replica doesn't yet + // exist. + state := invalidTrackerState + var err error + + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + + require.True(t, strings.HasPrefix(parts[0], "store_id=")) + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "store_id=") + storeID, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "replica_id=")) + parts[1] = strings.TrimPrefix(strings.TrimSpace(parts[1]), "replica_id=") + replicaID, err = strconv.Atoi(parts[1]) + require.NoError(t, err) + + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "type=")) + parts[2] = strings.TrimPrefix(strings.TrimSpace(parts[2]), "type=") + switch parts[2] { + case "VOTER_FULL": + replicaType = roachpb.VOTER_FULL + case "VOTER_INCOMING": + replicaType = roachpb.VOTER_INCOMING + case "VOTER_DEMOTING_LEARNER": + replicaType = roachpb.VOTER_DEMOTING_LEARNER + case "LEARNER": + replicaType = roachpb.LEARNER + case "NON_VOTER": + replicaType = roachpb.NON_VOTER + case "VOTER_DEMOTING_NON_VOTER": + replicaType = roachpb.VOTER_DEMOTING_NON_VOTER + default: + panic("unknown replica type") + } + + // The fourth field is optional, if set it contains the tracker state of the + // replica on the leader replica (localReplicaID). The valid states are + // Probe, Replicate, and Snapshot. + if len(parts) > 3 { + parts[3] = strings.TrimSpace(parts[3]) + require.True(t, strings.HasPrefix(parts[3], "state=")) + parts[3] = strings.TrimPrefix(strings.TrimSpace(parts[3]), "state=") + switch parts[3] { + case "StateProbe": + state = tracker.StateProbe + case "StateReplicate": + state = tracker.StateReplicate + case "StateSnapshot": + state = tracker.StateSnapshot + default: + panic("unknown replica state") + } + } + + return testingReplica{ + desc: roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: roachpb.StoreID(storeID), + ReplicaID: roachpb.ReplicaID(replicaID), + Type: replicaType, + }, + info: FollowerStateInfo{State: state}, + } +} + +func parsePriority(t *testing.T, input string) admissionpb.WorkPriority { + switch input { + case "LowPri": + return admissionpb.LowPri + case "NormalPri": + return admissionpb.NormalPri + case "HighPri": + return admissionpb.HighPri + default: + require.Failf(t, "unknown work class", "%v", input) + return admissionpb.WorkPriority(-1) + } +} + +type entryInfo struct { + term uint64 + index uint64 + enc raftlog.EntryEncoding + pri raftpb.Priority + tokens kvflowcontrol.Tokens +} + +func testingCreateEntry(t *testing.T, info entryInfo) raftpb.Entry { + cmdID := kvserverbase.CmdIDKey("11111111") + var metaBuf []byte + if info.enc.UsesAdmissionControl() { + meta := kvflowcontrolpb.RaftAdmissionMeta{ + AdmissionPriority: int32(info.pri), + } + var err error + metaBuf, err = protoutil.Marshal(&meta) + require.NoError(t, err) + } + cmdBufPrefix := raftlog.EncodeCommandBytes(info.enc, cmdID, nil, info.pri) + paddingLen := int(info.tokens) - len(cmdBufPrefix) - len(metaBuf) + // Padding also needs to decode as part of the RaftCommand proto, so we + // abuse the WriteBatch.Data field which is a byte slice. Since it is a + // nested field it consumes two tags plus two lengths. We'll approximate + // this as needing a maximum of 15 bytes, to be on the safe side. + require.LessOrEqual(t, 15, paddingLen) + cmd := kvserverpb.RaftCommand{ + WriteBatch: &kvserverpb.WriteBatch{Data: make([]byte, paddingLen)}} + // Shrink by 1 on each iteration. This doesn't give us a guarantee that we + // will get exactly paddingLen since the length of data affects the encoded + // lengths, but it should usually work, and cause fewer questions when + // looking at the testdata file. + for cmd.Size() > paddingLen { + cmd.WriteBatch.Data = cmd.WriteBatch.Data[:len(cmd.WriteBatch.Data)-1] + } + cmdBuf, err := protoutil.Marshal(&cmd) + require.NoError(t, err) + data := append(cmdBufPrefix, metaBuf...) + data = append(data, cmdBuf...) + return raftpb.Entry{ + Term: info.term, + Index: info.index, + Type: raftpb.EntryNormal, + Data: data, + } +} + +type testingProbeToCloseTimerScheduler struct { + state *testingRCState +} + +// testingProbeToCloseTimerScheduler implements the ProbeToCloseTimerScheduler +// interface. +var _ ProbeToCloseTimerScheduler = &testingProbeToCloseTimerScheduler{} + +func (t *testingProbeToCloseTimerScheduler) ScheduleSendStreamCloseRaftMuLocked( + ctx context.Context, rangeID roachpb.RangeID, delay time.Duration, +) { + // TODO(kvoli): We likely want to test the transition delay using the actual + // implementation, but we need to refactor out the close scheduler into a + // separate pkg, or bring it into this package. For now, just do something + // simple, which is to send raft events to each range on a tick. + go func() { + timer := t.state.ts.NewTimer() + defer timer.Stop() + timer.Reset(delay) + + select { + case <-t.state.stopper.ShouldQuiesce(): + return + case <-ctx.Done(): + return + case <-timer.Ch(): + } + timer.MarkRead() + require.NoError(t.state.t, + t.state.ranges[rangeID].rc.HandleRaftEventRaftMuLocked(ctx, RaftEvent{})) + }() +} + +// TestRangeController tests the RangeController's various methods. +// +// - init: Initializes the range controller with the given ranges. +// range_id= tenant_id= local_replica_id= +// store_id= replica_id= type= [state=] +// ... +// +// - tick: Advances the manual time by the given duration. +// duration= +// +// - wait_for_eval: Starts a WaitForEval call on the given range. +// range_id= name= pri= +// +// - check_state: Prints the current state of all ranges. +// +// - adjust_tokens: Adjusts the token count for the given store and priority. +// store_id= pri= tokens= +// ... +// +// - cancel_context: Cancels the context for the given range. +// range_id= name= +// +// - set_replicas: Sets the replicas for the given range. +// range_id= tenant_id= local_replica_id= +// store_id= replica_id= type= [state=] +// ... +// +// - set_leaseholder: Sets the leaseholder for the given range. +// range_id= replica_id= +// +// - close_rcs: Closes all range controllers. +// +// - admit: Admits the given store to the given range. +// range_id= +// store_id= term= to_index= pri= +// ... +// +// - raft_event: Simulates a raft event on the given rangeStateProbe, calling +// HandleRaftEvent. +// range_id= +// term= index= pri= size= +// ... +// +// - stream_state: Prints the state of the stream(s) for the given range's +// replicas. +// range_id= +// +// - metrics: Prints the current state of the eval metrics. +func TestRangeController(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + datadriven.Walk(t, datapathutils.TestDataPath(t, "range_controller"), func(t *testing.T, path string) { + state := &testingRCState{} + state.init(t, ctx) + defer state.stopper.Stop(ctx) + + datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "init": + var regularInitString, elasticInitString string + var regularLimitString, elasticLimitString string + d.MaybeScanArgs(t, "regular_init", ®ularInitString) + d.MaybeScanArgs(t, "elastic_init", &elasticInitString) + d.MaybeScanArgs(t, "regular_limit", ®ularLimitString) + d.MaybeScanArgs(t, "elastic_limit", &elasticLimitString) + // If the test specifies different token limits or initial token counts + // (default is the limit), then we override the default limit and also + // store the initial token count. tokenCounters are created + // dynamically, so we update them on the fly as well. + if regularLimitString != "" { + regularLimit, err := humanizeutil.ParseBytes(regularLimitString) + require.NoError(t, err) + kvflowcontrol.RegularTokensPerStream.Override(ctx, &state.settings.SV, regularLimit) + } + if elasticLimitString != "" { + elasticLimit, err := humanizeutil.ParseBytes(elasticLimitString) + require.NoError(t, err) + kvflowcontrol.ElasticTokensPerStream.Override(ctx, &state.settings.SV, elasticLimit) + } + if regularInitString != "" { + regularInit, err := humanizeutil.ParseBytes(regularInitString) + require.NoError(t, err) + state.initialRegularTokens = kvflowcontrol.Tokens(regularInit) + } + if elasticInitString != "" { + elasticInit, err := humanizeutil.ParseBytes(elasticInitString) + require.NoError(t, err) + state.initialElasticTokens = kvflowcontrol.Tokens(elasticInit) + } + + for _, r := range scanRanges(t, d.Input) { + state.getOrInitRange(r) + } + return state.rangeStateString() + state.tokenCountsString() + + case "tick": + var durationStr string + d.ScanArgs(t, "duration", &durationStr) + duration, err := time.ParseDuration(durationStr) + require.NoError(t, err) + state.ts.Advance(duration) + // Sleep for a bit to allow any timers to fire. + time.Sleep(20 * time.Millisecond) + return fmt.Sprintf("now=%v", humanizeutil.Duration( + state.ts.Now().Sub(timeutil.UnixEpoch))) + + case "wait_for_eval": + var rangeID int + var name, priString string + d.ScanArgs(t, "range_id", &rangeID) + d.ScanArgs(t, "name", &name) + d.ScanArgs(t, "pri", &priString) + testRC := state.ranges[roachpb.RangeID(rangeID)] + testRC.startWaitForEval(name, parsePriority(t, priString)) + return state.evalStateString() + + case "check_state": + return state.evalStateString() + + case "adjust_tokens": + for _, line := range strings.Split(d.Input, "\n") { + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + require.True(t, strings.HasPrefix(parts[0], "store_id=")) + parts[0] = strings.TrimPrefix(parts[0], "store_id=") + store, err := strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "pri=")) + pri := parsePriority(t, strings.TrimPrefix(parts[1], "pri=")) + + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "tokens=")) + tokenString := strings.TrimPrefix(parts[2], "tokens=") + tokens, err := humanizeutil.ParseBytes(tokenString) + require.NoError(t, err) + + state.ssTokenCounter.Eval(kvflowcontrol.Stream{ + StoreID: roachpb.StoreID(store), + TenantID: roachpb.SystemTenantID, + }).adjust(ctx, + admissionpb.WorkClassFromPri(pri), + kvflowcontrol.Tokens(tokens)) + } + + return state.tokenCountsString() + + case "cancel_context": + var rangeID int + var name string + d.ScanArgs(t, "range_id", &rangeID) + d.ScanArgs(t, "name", &name) + testRC := state.ranges[roachpb.RangeID(rangeID)] + func() { + testRC.mu.Lock() + defer testRC.mu.Unlock() + testRC.mu.evals[name].cancel() + }() + + return state.evalStateString() + + case "set_replicas": + for _, r := range scanRanges(t, d.Input) { + testRC := state.getOrInitRange(r) + func() { + testRC.mu.Lock() + defer testRC.mu.Unlock() + testRC.mu.r = r + }() + require.NoError(t, testRC.rc.SetReplicasRaftMuLocked(ctx, r.replicas())) + // Send an empty raft event in order to trigger any potential + // connectedState changes. + require.NoError(t, testRC.rc.HandleRaftEventRaftMuLocked(ctx, RaftEvent{})) + } + // Sleep for a bit to allow any timers to fire. + time.Sleep(20 * time.Millisecond) + return state.rangeStateString() + + case "set_leaseholder": + var rangeID, replicaID int + d.ScanArgs(t, "range_id", &rangeID) + d.ScanArgs(t, "replica_id", &replicaID) + testRC := state.ranges[roachpb.RangeID(rangeID)] + testRC.rc.SetLeaseholderRaftMuLocked(ctx, roachpb.ReplicaID(replicaID)) + return state.rangeStateString() + + case "close_rcs": + for _, r := range state.ranges { + r.rc.CloseRaftMuLocked(ctx) + } + evalStr := state.evalStateString() + for k := range state.ranges { + delete(state.ranges, k) + } + return evalStr + + case "admit": + var lastRangeID roachpb.RangeID + for _, line := range strings.Split(d.Input, "\n") { + var ( + rangeID int + storeID int + term int + to_index int + err error + ) + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + + if strings.HasPrefix(parts[0], "range_id=") { + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "range_id=") + rangeID, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + lastRangeID = roachpb.RangeID(rangeID) + } else { + parts[0] = strings.TrimSpace(parts[0]) + require.True(t, strings.HasPrefix(parts[0], "store_id=")) + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "store_id=") + storeID, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "term=")) + parts[1] = strings.TrimPrefix(strings.TrimSpace(parts[1]), "term=") + term, err = strconv.Atoi(parts[1]) + require.NoError(t, err) + + // TODO(sumeer): the test input only specifies an + // incremental change to the admitted vector, for a + // single priority. However, in practice, the whole + // vector will be updated, which also cleanly handles + // the case of an advancing term. Consider changing + // this to accept a non-incremental update. + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "to_index=")) + parts[2] = strings.TrimPrefix(strings.TrimSpace(parts[2]), "to_index=") + to_index, err = strconv.Atoi(parts[2]) + require.NoError(t, err) + + parts[3] = strings.TrimSpace(parts[3]) + require.True(t, strings.HasPrefix(parts[3], "pri=")) + parts[3] = strings.TrimPrefix(strings.TrimSpace(parts[3]), "pri=") + pri := parsePriority(t, parts[3]) + state.ranges[lastRangeID].admit(ctx, t, roachpb.StoreID(storeID), uint64(term), uint64(to_index), pri) + } + } + return state.tokenCountsString() + + case "raft_event": + var lastRangeID roachpb.RangeID + init := false + var buf []entryInfo + + propRangeEntries := func() { + event := RaftEvent{ + Entries: make([]raftpb.Entry, len(buf)), + } + for i, state := range buf { + event.Entries[i] = testingCreateEntry(t, state) + } + err := state.ranges[lastRangeID].rc.HandleRaftEventRaftMuLocked(ctx, event) + require.NoError(t, err) + } + + for _, line := range strings.Split(d.Input, "\n") { + var ( + rangeID, term, index int + size int64 + err error + pri admissionpb.WorkPriority + ) + + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + if strings.HasPrefix(parts[0], "range_id=") { + if init { + // We are moving to another range, if a previous range has entries + // created then create the raft event and call handle raft ready + // using all the entries added so far. + propRangeEntries() + init = false + } + + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "range_id=") + rangeID, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + lastRangeID = roachpb.RangeID(rangeID) + } else { + require.True(t, strings.HasPrefix(parts[0], "term=")) + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "term=") + term, err = strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "index=")) + parts[1] = strings.TrimPrefix(strings.TrimSpace(parts[1]), "index=") + index, err = strconv.Atoi(parts[1]) + require.NoError(t, err) + + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "pri=")) + parts[2] = strings.TrimPrefix(strings.TrimSpace(parts[2]), "pri=") + pri = parsePriority(t, parts[2]) + + parts[3] = strings.TrimSpace(parts[3]) + require.True(t, strings.HasPrefix(parts[3], "size=")) + parts[3] = strings.TrimPrefix(strings.TrimSpace(parts[3]), "size=") + size, err = humanizeutil.ParseBytes(parts[3]) + require.NoError(t, err) + + init = true + buf = append(buf, entryInfo{ + term: uint64(term), + index: uint64(index), + enc: raftlog.EntryEncodingStandardWithACAndPriority, + tokens: kvflowcontrol.Tokens(size), + pri: AdmissionToRaftPriority(pri), + }) + } + } + if init { + propRangeEntries() + } + return state.tokenCountsString() + + case "stream_state": + var rangeID int + d.ScanArgs(t, "range_id", &rangeID) + return state.sendStreamString(roachpb.RangeID(rangeID)) + + case "metrics": + var buf strings.Builder + evalMetrics := state.evalMetrics + + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + fmt.Fprintf(&buf, "%-50v: %v\n", evalMetrics.Waiting[wc].GetName(), evalMetrics.Waiting[wc].Value()) + fmt.Fprintf(&buf, "%-50v: %v\n", evalMetrics.Admitted[wc].GetName(), evalMetrics.Admitted[wc].Count()) + fmt.Fprintf(&buf, "%-50v: %v\n", evalMetrics.Errored[wc].GetName(), evalMetrics.Errored[wc].Count()) + fmt.Fprintf(&buf, "%-50v: %v\n", evalMetrics.Bypassed[wc].GetName(), evalMetrics.Bypassed[wc].Count()) + // We only print the number of recorded durations, instead of any + // percentiles or cumulative wait times as these are + // non-deterministic in the test. + fmt.Fprintf(&buf, "%-50v: %v\n", + fmt.Sprintf("%v.count", evalMetrics.Duration[wc].GetName()), + testingFirst(evalMetrics.Duration[wc].CumulativeSnapshot().Total())) + } + return buf.String() + + default: + panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + } + }) + }) +} + +func TestGetEntryFCState(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + for _, tc := range []struct { + name string + entryInfo entryInfo + expectedFCState entryFCState + }{ + { + // V1 encoded entries with AC should end up with LowPri and otherwise + // matching entry information. + name: "v1_entry_with_ac", + entryInfo: entryInfo{ + term: 1, + index: 1, + enc: raftlog.EntryEncodingStandardWithAC, + pri: raftpb.NormalPri, + tokens: 100, + }, + expectedFCState: entryFCState{ + term: 1, + index: 1, + pri: raftpb.LowPri, + usesFlowControl: true, + tokens: 100, + }, + }, + { + // Likewise for V1 sideloaded entries with AC enabled. + name: "v1_entry_with_ac_sideloaded", + entryInfo: entryInfo{ + term: 2, + index: 2, + enc: raftlog.EntryEncodingSideloadedWithAC, + pri: raftpb.HighPri, + tokens: 200, + }, + expectedFCState: entryFCState{ + term: 2, + index: 2, + pri: raftpb.LowPri, + usesFlowControl: true, + tokens: 200, + }, + }, + { + name: "entry_without_ac", + entryInfo: entryInfo{ + term: 3, + index: 3, + enc: raftlog.EntryEncodingStandardWithoutAC, + tokens: 300, + }, + expectedFCState: entryFCState{ + term: 3, + index: 3, + usesFlowControl: false, + tokens: 300, + }, + }, + { + // V2 encoded entries with AC should end up with their original priority. + name: "v2_entry_with_ac", + entryInfo: entryInfo{ + term: 4, + index: 4, + enc: raftlog.EntryEncodingStandardWithACAndPriority, + pri: raftpb.NormalPri, + tokens: 400, + }, + expectedFCState: entryFCState{ + term: 4, + index: 4, + pri: raftpb.NormalPri, + usesFlowControl: true, + tokens: 400, + }, + }, + { + // Likewise for V2 sideloaded entries with AC enabled. + name: "v2_entry_with_ac", + entryInfo: entryInfo{ + term: 5, + index: 5, + enc: raftlog.EntryEncodingSideloadedWithACAndPriority, + pri: raftpb.AboveNormalPri, + tokens: 500, + }, + expectedFCState: entryFCState{ + term: 5, + index: 5, + pri: raftpb.AboveNormalPri, + usesFlowControl: true, + tokens: 500, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + entry := testingCreateEntry(t, tc.entryInfo) + fcState := getEntryFCStateOrFatal(ctx, entry) + require.Equal(t, tc.expectedFCState, fcState) + }) + } +} + +func testingFirst(args ...interface{}) interface{} { + if len(args) > 0 { + return args[0] + } + return nil +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go index af373c0542f9..233aa3b58764 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream.go @@ -12,61 +12,269 @@ package rac2 import ( "context" + "fmt" + "strings" + "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/redact" + "github.com/dustin/go-humanize" ) // StreamTokenCounterProvider is the interface for retrieving token counters // for a given stream. // // TODO(kvoli): Add stream deletion upon decommissioning a store. -// TODO(kvoli): Check mutex performance against syncutil.Map. type StreamTokenCounterProvider struct { - settings *cluster.Settings - - mu struct { - syncutil.Mutex - sendCounters, evalCounters map[kvflowcontrol.Stream]TokenCounter - } + settings *cluster.Settings + clock *hlc.Clock + tokenMetrics *TokenMetrics + sendLogger, evalLogger *blockedStreamLogger + sendCounters, evalCounters syncutil.Map[kvflowcontrol.Stream, tokenCounter] } // NewStreamTokenCounterProvider creates a new StreamTokenCounterProvider. -func NewStreamTokenCounterProvider(settings *cluster.Settings) *StreamTokenCounterProvider { +func NewStreamTokenCounterProvider( + settings *cluster.Settings, clock *hlc.Clock, +) *StreamTokenCounterProvider { return &StreamTokenCounterProvider{ - settings: settings, + settings: settings, + clock: clock, + tokenMetrics: NewTokenMetrics(), + sendLogger: newBlockedStreamLogger(flowControlSendMetricType), + evalLogger: newBlockedStreamLogger(flowControlEvalMetricType), } } // Eval returns the evaluation token counter for the given stream. -func (p *StreamTokenCounterProvider) Eval(stream kvflowcontrol.Stream) TokenCounter { - p.mu.Lock() - defer p.mu.Unlock() - - if t, ok := p.mu.evalCounters[stream]; ok { +func (p *StreamTokenCounterProvider) Eval(stream kvflowcontrol.Stream) *tokenCounter { + if t, ok := p.evalCounters.Load(stream); ok { return t } - - t := newTokenCounter(p.settings) - p.mu.evalCounters[stream] = t + t, _ := p.evalCounters.LoadOrStore(stream, newTokenCounter( + p.settings, p.clock, p.tokenMetrics.CounterMetrics[flowControlEvalMetricType])) return t } // Send returns the send token counter for the given stream. -func (p *StreamTokenCounterProvider) Send(stream kvflowcontrol.Stream) TokenCounter { - p.mu.Lock() - defer p.mu.Unlock() - - if t, ok := p.mu.sendCounters[stream]; ok { +func (p *StreamTokenCounterProvider) Send(stream kvflowcontrol.Stream) *tokenCounter { + if t, ok := p.sendCounters.Load(stream); ok { return t } - - t := newTokenCounter(p.settings) - p.mu.sendCounters[stream] = t + t, _ := p.sendCounters.LoadOrStore(stream, newTokenCounter( + p.settings, p.clock, p.tokenMetrics.CounterMetrics[flowControlSendMetricType])) return t } +// UpdateMetricGauges updates the gauge token metrics and logs blocked streams. +func (p *StreamTokenCounterProvider) UpdateMetricGauges() { + var ( + count [numFlowControlMetricTypes][admissionpb.NumWorkClasses]int64 + blockedCount [numFlowControlMetricTypes][admissionpb.NumWorkClasses]int64 + tokensAvailable [numFlowControlMetricTypes][admissionpb.NumWorkClasses]int64 + ) + now := p.clock.PhysicalTime() + + // First aggregate the metrics across all streams, by (eval|send) types and + // (regular|elastic) work classes, then using the aggregate update the + // gauges. + gaugeUpdateFn := func(metricType flowControlMetricType) func( + kvflowcontrol.Stream, *tokenCounter) bool { + return func(stream kvflowcontrol.Stream, t *tokenCounter) bool { + regularTokens := t.tokens(admissionpb.RegularWorkClass) + elasticTokens := t.tokens(admissionpb.ElasticWorkClass) + count[metricType][regular]++ + count[metricType][elastic]++ + tokensAvailable[metricType][regular] += int64(regularTokens) + tokensAvailable[metricType][elastic] += int64(elasticTokens) + + if regularTokens <= 0 { + blockedCount[metricType][regular]++ + } + if elasticTokens <= 0 { + blockedCount[metricType][elastic]++ + } + + return true + } + } + + p.evalCounters.Range(gaugeUpdateFn(flowControlEvalMetricType)) + p.sendCounters.Range(gaugeUpdateFn(flowControlSendMetricType)) + for _, typ := range []flowControlMetricType{ + flowControlEvalMetricType, + flowControlSendMetricType, + } { + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + p.tokenMetrics.StreamMetrics[typ].Count[wc].Update(count[typ][wc]) + p.tokenMetrics.StreamMetrics[typ].BlockedCount[wc].Update(blockedCount[typ][wc]) + p.tokenMetrics.StreamMetrics[typ].TokensAvailable[wc].Update(tokensAvailable[typ][wc]) + } + } + + // Next, check if any of the blocked stream loggers are ready to log, if so + // we iterate over every (token|send) stream and observe the stream state. + // When vmodule=2, the logger is always ready. + logStreamFn := func(logger *blockedStreamLogger) func( + stream kvflowcontrol.Stream, t *tokenCounter) bool { + return func(stream kvflowcontrol.Stream, t *tokenCounter) bool { + // NB: We reset each stream's stats here. The stat returned will be the + // delta between the last stream observation and now. + regularStats, elasticStats := t.GetAndResetStats(now) + logger.observeStream(stream, now, + t.tokens(regular), t.tokens(elastic), regularStats, elasticStats) + return true + } + } + if p.evalLogger.willLog() { + p.evalCounters.Range(logStreamFn(p.evalLogger)) + p.evalLogger.flushLogs() + } + if p.sendLogger.willLog() { + p.sendCounters.Range(logStreamFn(p.sendLogger)) + p.sendLogger.flushLogs() + } +} + +// Metrics returns metrics tracking the token counters and streams. +func (p *StreamTokenCounterProvider) Metrics() metric.Struct { + return p.tokenMetrics +} + +// TODO(kvoli): Consider adjusting these limits and making them configurable. +const ( + // streamStatsCountCap is the maximum number of streams to log verbose stats + // for. Streams are only logged if they were blocked at some point in the + // last metrics interval. + streamStatsCountCap = 20 + // blockedStreamCountCap is the maximum number of streams to log (compactly) + // as currently blocked. + blockedStreamCountCap = 100 + // blockedStreamLoggingInterval is the interval at which blocked streams are + // logged. This interval applies independently to both eval and send streams + // i.e., we log both eval and send streams at this interval, independent of + // each other. + blockedStreamLoggingInterval = 30 * time.Second +) + +type blockedStreamLogger struct { + metricType flowControlMetricType + limiter log.EveryN + // blockedCount is the total number of unique streams blocked in the last + // interval, regardless of the work class e.g., if 5 streams exist and all + // are blocked for both elastic and regular work classes, the counts would + // be: + // blockedRegularCount=5 + // blockedElasticCount=5 + // blockedCount=5 + blockedCount int + blockedElasticCount int + blockedRegularCount int + elaBuf, regBuf strings.Builder +} + +func newBlockedStreamLogger(metricType flowControlMetricType) *blockedStreamLogger { + return &blockedStreamLogger{ + metricType: metricType, + limiter: log.Every(blockedStreamLoggingInterval), + } +} + +func (b *blockedStreamLogger) willLog() bool { + return b.limiter.ShouldLog() +} + +func (b *blockedStreamLogger) flushLogs() { + if b.blockedRegularCount > 0 { + log.Warningf(context.Background(), "%d blocked %s regular replication stream(s): %s", + b.blockedRegularCount, b.metricType, redact.SafeString(b.regBuf.String())) + } + if b.blockedElasticCount > 0 { + log.Warningf(context.Background(), "%d blocked %s elastic replication stream(s): %s", + b.blockedElasticCount, b.metricType, redact.SafeString(b.elaBuf.String())) + } + b.elaBuf.Reset() + b.regBuf.Reset() + b.blockedCount = 0 + b.blockedRegularCount = 0 + b.blockedElasticCount = 0 +} + +func (b *blockedStreamLogger) observeStream( + stream kvflowcontrol.Stream, + now time.Time, + regularTokens, elasticTokens kvflowcontrol.Tokens, + regularStats, elasticStats deltaStats, +) { + // Log stats, which reflect both elastic and regular at the interval defined + // by blockedStreamLoggingInteval. If a high-enough log verbosity is + // specified, shouldLogBacked will always be true, but since this method + // executes at the frequency of scraping the metric, we will still log at a + // reasonable rate. + logBlockedStream := func(stream kvflowcontrol.Stream, blockedCount int, buf *strings.Builder) { + if blockedCount == 1 { + buf.WriteString(stream.String()) + } else if blockedCount <= blockedStreamCountCap { + buf.WriteString(", ") + buf.WriteString(stream.String()) + } else if blockedCount == blockedStreamCountCap+1 { + buf.WriteString(" omitted some due to overflow") + } + } + + if regularTokens <= 0 { + b.blockedRegularCount++ + logBlockedStream(stream, b.blockedRegularCount, &b.regBuf) + } + if elasticTokens <= 0 { + b.blockedElasticCount++ + logBlockedStream(stream, b.blockedElasticCount, &b.elaBuf) + } + if regularStats.noTokenDuration == 0 && elasticStats.noTokenDuration == 0 { + return + } + + b.blockedCount++ + if b.blockedCount <= streamStatsCountCap { + var bb strings.Builder + fmt.Fprintf(&bb, "%v stream %s was blocked: durations:", b.metricType, stream.String()) + if regularStats.noTokenDuration > 0 { + fmt.Fprintf(&bb, " regular %s", regularStats.noTokenDuration.String()) + } + if elasticStats.noTokenDuration > 0 { + fmt.Fprintf(&bb, " elastic %s", elasticStats.noTokenDuration.String()) + } + regularDelta := regularStats.tokensReturned - regularStats.tokensDeducted + elasticDelta := elasticStats.tokensReturned - elasticStats.tokensDeducted + fmt.Fprintf(&bb, " tokens delta: regular %s (%s - %s) elastic %s (%s - %s)", + pprintTokens(regularDelta), + pprintTokens(regularStats.tokensReturned), + pprintTokens(regularStats.tokensDeducted), + pprintTokens(elasticDelta), + pprintTokens(elasticStats.tokensReturned), + pprintTokens(elasticStats.tokensDeducted)) + log.Infof(context.Background(), "%s", redact.SafeString(bb.String())) + } else if b.blockedCount == streamStatsCountCap+1 { + log.Infof(context.Background(), "skipped logging some streams that were blocked") + } +} + +func pprintTokens(t kvflowcontrol.Tokens) string { + if t < 0 { + return fmt.Sprintf("-%s", humanize.IBytes(uint64(-t))) + } + return humanize.IBytes(uint64(t)) +} + // SendTokenWatcherHandleID is a unique identifier for a handle that is // watching for available elastic send tokens on a stream. type SendTokenWatcherHandleID int64 @@ -85,7 +293,7 @@ type SendTokenWatcher interface { // call CancelHandle when tokens are no longer needed, or when the caller is // done. NotifyWhenAvailable( - TokenCounter, + *tokenCounter, TokenGrantNotification, ) SendTokenWatcherHandleID // CancelHandle cancels the given handle, stopping it from being notified diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream_test.go new file mode 100644 index 000000000000..ef8ee71970c3 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/store_stream_test.go @@ -0,0 +1,205 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + "fmt" + "math" + "regexp" + "testing" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +func TestBlockedStreamLogging(t *testing.T) { + defer leaktest.AfterTest(t)() + s := log.ScopeWithoutShowLogs(t) + // Causes every call to update the gauges to log. + prevVModule := log.GetVModule() + _ = log.SetVModule("store_stream=2") + defer func() { _ = log.SetVModule(prevVModule) }() + defer s.Close(t) + + ctx := context.Background() + testStartTs := timeutil.Now() + + makeStream := func(id uint64) kvflowcontrol.Stream { + return kvflowcontrol.Stream{ + TenantID: roachpb.MustMakeTenantID(id), + StoreID: roachpb.StoreID(id), + } + } + + st := cluster.MakeTestingClusterSettings() + const numTokens = 1 << 20 /* 1 MiB */ + kvflowcontrol.ElasticTokensPerStream.Override(ctx, &st.SV, numTokens) + kvflowcontrol.RegularTokensPerStream.Override(ctx, &st.SV, numTokens) + p := NewStreamTokenCounterProvider(st, hlc.NewClockForTesting(nil)) + + numBlocked := 0 + createStreamAndExhaustTokens := func(id uint64, checkMetric bool) { + p.Eval(makeStream(id)).Deduct(ctx, admissionpb.RegularWorkClass, kvflowcontrol.Tokens(numTokens)) + if checkMetric { + p.UpdateMetricGauges() + require.Equal(t, int64(numBlocked+1), p.tokenMetrics.StreamMetrics[flowControlEvalMetricType].BlockedCount[elastic].Value()) + require.Equal(t, int64(numBlocked+1), p.tokenMetrics.StreamMetrics[flowControlEvalMetricType].BlockedCount[regular].Value()) + } + numBlocked++ + } + // 1 stream that is blocked. + id := uint64(1) + createStreamAndExhaustTokens(id, true) + // Total 24 streams are blocked. + for id++; id < 25; id++ { + createStreamAndExhaustTokens(id, false) + } + // 25th stream will also be blocked. The detailed stats will only cover an + // arbitrary subset of 20 streams. + log.Infof(ctx, "creating stream id %d", id) + createStreamAndExhaustTokens(id, true) + + // Total 104 streams are blocked. + for id++; id < 105; id++ { + createStreamAndExhaustTokens(id, false) + } + // 105th stream will also be blocked. The blocked stream names will only + // list 100 streams. + log.Infof(ctx, "creating stream id %d", id) + createStreamAndExhaustTokens(id, true) + + log.FlushFiles() + entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), + math.MaxInt64, 2000, + regexp.MustCompile(`store_stream\.go|store_stream_test\.go`), + log.WithMarkedSensitiveData) + require.NoError(t, err) + + blockedStreamRegexp, err := regexp.Compile( + "eval stream .* was blocked: durations: regular .* elastic .* tokens delta: regular .* elastic .*") + require.NoError(t, err) + blockedStreamSkippedRegexp, err := regexp.Compile( + "skipped logging some streams that were blocked") + require.NoError(t, err) + + const blockedCountElasticRegexp = "%d blocked eval elastic replication stream.*" + const blockedCountRegularRegexp = "%d blocked eval regular replication stream.*" + blocked1ElasticRegexp, err := regexp.Compile(fmt.Sprintf(blockedCountElasticRegexp, 1)) + require.NoError(t, err) + blocked1RegularRegexp, err := regexp.Compile(fmt.Sprintf(blockedCountRegularRegexp, 1)) + require.NoError(t, err) + blocked25ElasticRegexp, err := regexp.Compile(fmt.Sprintf(blockedCountElasticRegexp, 25)) + require.NoError(t, err) + blocked25RegularRegexp, err := regexp.Compile(fmt.Sprintf(blockedCountRegularRegexp, 25)) + require.NoError(t, err) + blocked105ElasticRegexp, err := regexp.Compile( + "105 blocked eval elastic replication stream.* omitted some due to overflow") + require.NoError(t, err) + blocked105RegularRegexp, err := regexp.Compile( + "105 blocked eval regular replication stream.* omitted some due to overflow") + require.NoError(t, err) + + const creatingRegexp = "creating stream id %d" + creating25Regexp, err := regexp.Compile(fmt.Sprintf(creatingRegexp, 25)) + require.NoError(t, err) + creating105Regexp, err := regexp.Compile(fmt.Sprintf(creatingRegexp, 105)) + require.NoError(t, err) + + blockedStreamCount := 0 + foundBlockedElastic := false + foundBlockedRegular := false + foundBlockedStreamSkipped := false + // First section of the log where 1 stream blocked. Entries are in reverse + // chronological order. + index := len(entries) - 1 + for ; index >= 0; index-- { + entry := entries[index] + if creating25Regexp.MatchString(entry.Message) { + break + } + if blockedStreamRegexp.MatchString(entry.Message) { + blockedStreamCount++ + } + if blocked1ElasticRegexp.MatchString(entry.Message) { + foundBlockedElastic = true + } + if blocked1RegularRegexp.MatchString(entry.Message) { + foundBlockedRegular = true + } + if blockedStreamSkippedRegexp.MatchString(entry.Message) { + foundBlockedStreamSkipped = true + } + } + require.Equal(t, 1, blockedStreamCount) + require.True(t, foundBlockedElastic) + require.True(t, foundBlockedRegular) + require.False(t, foundBlockedStreamSkipped) + + blockedStreamCount = 0 + foundBlockedElastic = false + foundBlockedRegular = false + // Second section of the log where 25 streams are blocked and 20 are logged + // (streamStatsCountCap). + for ; index >= 0; index-- { + entry := entries[index] + if creating105Regexp.MatchString(entry.Message) { + break + } + if blockedStreamRegexp.MatchString(entry.Message) { + blockedStreamCount++ + } + if blocked25ElasticRegexp.MatchString(entry.Message) { + foundBlockedElastic = true + } + if blocked25RegularRegexp.MatchString(entry.Message) { + foundBlockedRegular = true + } + if blockedStreamSkippedRegexp.MatchString(entry.Message) { + foundBlockedStreamSkipped = true + } + } + require.Equal(t, 20, blockedStreamCount) + require.True(t, foundBlockedElastic) + require.True(t, foundBlockedRegular) + require.True(t, foundBlockedStreamSkipped) + + blockedStreamCount = 0 + foundBlockedElastic = false + foundBlockedRegular = false + // Third section of the log where 105 streams blocked. + for ; index >= 0; index-- { + entry := entries[index] + if blockedStreamRegexp.MatchString(entry.Message) { + blockedStreamCount++ + } + if blocked105ElasticRegexp.MatchString(entry.Message) { + foundBlockedElastic = true + } + if blocked105RegularRegexp.MatchString(entry.Message) { + foundBlockedRegular = true + } + if blockedStreamSkippedRegexp.MatchString(entry.Message) { + foundBlockedStreamSkipped = true + } + } + require.Equal(t, 20, blockedStreamCount) + require.True(t, foundBlockedElastic, "unable to find %v", blocked105ElasticRegexp) + require.True(t, foundBlockedRegular, "unable to find %v", blocked105RegularRegexp) + require.True(t, foundBlockedStreamSkipped, "unable to find %v", blockedStreamSkippedRegexp) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/log_tracker b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/log_tracker new file mode 100644 index 000000000000..fdf07a92e695 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/log_tracker @@ -0,0 +1,282 @@ +# ------------------------------------------------------------------------------ +# Test basic operations. + +reset term=1 index=5 +---- +mark:{Term:1 Index:5}, stable:5, admitted:[5 5 5 5] + +append term=1 after=5 to=10 +---- +mark:{Term:1 Index:10}, stable:5, admitted:[5 5 5 5] + +register term=1 index=7 pri=LowPri +---- +mark:{Term:1 Index:10}, stable:5, admitted:[5 5 5 5] +LowPri: {Term:1 Index:7} + +sync term=1 index=10 +---- +[upd] mark:{Term:1 Index:10}, stable:10, admitted:[6 10 10 10] +LowPri: {Term:1 Index:7} + +admit term=1 index=6 pri=LowPri +---- +mark:{Term:1 Index:10}, stable:10, admitted:[6 10 10 10] +LowPri: {Term:1 Index:7} + +admit term=1 index=7 pri=LowPri +---- +[upd] mark:{Term:1 Index:10}, stable:10, admitted:[10 10 10 10] + +append term=1 after=10 to=12 +---- +mark:{Term:1 Index:12}, stable:10, admitted:[10 10 10 10] + +register term=1 index=11 pri=LowPri +---- +mark:{Term:1 Index:12}, stable:10, admitted:[10 10 10 10] +LowPri: {Term:1 Index:11} + +register term=1 index=12 pri=HighPri +---- +mark:{Term:1 Index:12}, stable:10, admitted:[10 10 10 10] +LowPri: {Term:1 Index:11} +HighPri: {Term:1 Index:12} + +append term=2 after=5 to=10 +---- +[upd] mark:{Term:2 Index:10}, stable:5, admitted:[5 5 5 5] + +# ------------------------------------------------------------------------------ +# Test stable index advancement racing with admission. + +reset term=1 index=1 +---- +mark:{Term:1 Index:1}, stable:1, admitted:[1 1 1 1] + +append term=1 after=1 to=10 +---- +mark:{Term:1 Index:10}, stable:1, admitted:[1 1 1 1] + +register term=1 index=5 pri=HighPri +---- +mark:{Term:1 Index:10}, stable:1, admitted:[1 1 1 1] +HighPri: {Term:1 Index:5} + +admit term=1 index=5 pri=HighPri +---- +mark:{Term:1 Index:10}, stable:1, admitted:[1 1 1 1] + +sync term=1 index=10 +---- +[upd] mark:{Term:1 Index:10}, stable:10, admitted:[10 10 10 10] + +# ------------------------------------------------------------------------------ +# Same race but sync completes first. + +reset term=1 index=1 +---- +mark:{Term:1 Index:1}, stable:1, admitted:[1 1 1 1] + +append term=1 after=1 to=10 +---- +mark:{Term:1 Index:10}, stable:1, admitted:[1 1 1 1] + +register term=1 index=5 pri=HighPri +---- +mark:{Term:1 Index:10}, stable:1, admitted:[1 1 1 1] +HighPri: {Term:1 Index:5} + +sync term=1 index=10 +---- +[upd] mark:{Term:1 Index:10}, stable:10, admitted:[10 10 10 4] +HighPri: {Term:1 Index:5} + +admit term=1 index=5 pri=HighPri +---- +[upd] mark:{Term:1 Index:10}, stable:10, admitted:[10 10 10 10] + +# ------------------------------------------------------------------------------ +# Regression test with admitting an entry missing in the queue. + +reset term=1 index=1 +---- +mark:{Term:1 Index:1}, stable:1, admitted:[1 1 1 1] + +append term=1 after=1 to=3 +---- +mark:{Term:1 Index:3}, stable:1, admitted:[1 1 1 1] + +register term=1 index=3 pri=LowPri +---- +mark:{Term:1 Index:3}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:1 Index:3} + +append term=3 after=3 to=5 +---- +[upd] mark:{Term:3 Index:5}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:1 Index:3} + +# The term 1 entry is admitted because the admission mark is at term 3. +admit term=3 index=2 pri=LowPri +---- +mark:{Term:3 Index:5}, stable:1, admitted:[1 1 1 1] + +# ------------------------------------------------------------------------------ +# Port of waiting_for_admission_state test. + +reset term=3 index=1 +---- +mark:{Term:3 Index:1}, stable:1, admitted:[1 1 1 1] + +append term=3 after=1 to=7 +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] + +register term=3 index=5 pri=LowPri +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:3 Index:5} + +register term=3 index=6 pri=HighPri +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:6} + +register term=3 index=7 pri=HighPri +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:6} {Term:3 Index:7} + +# No-op, since the term is old. +admit term=2 index=7 pri=HighPri +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:6} {Term:3 Index:7} + +# Entry admitted, but the admitted index stays at 1 since the stable index is +# still there. +admit term=3 index=6 pri=HighPri +---- +mark:{Term:3 Index:7}, stable:1, admitted:[1 1 1 1] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:7} + +# Stable index moves, and admitted indices move accordingly. +sync term=3 index=7 +---- +[upd] mark:{Term:3 Index:7}, stable:7, admitted:[4 7 7 6] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:7} + +append term=3 after=7 to=8 +---- +mark:{Term:3 Index:8}, stable:7, admitted:[4 7 7 6] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:7} + +register term=3 index=8 pri=HighPri +---- +mark:{Term:3 Index:8}, stable:7, admitted:[4 7 7 6] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:7} {Term:3 Index:8} + +# Admitted indices move up for priorities with no queue. +sync term=3 index=8 +---- +[upd] mark:{Term:3 Index:8}, stable:8, admitted:[4 8 8 6] +LowPri: {Term:3 Index:5} +HighPri: {Term:3 Index:7} {Term:3 Index:8} + +# All HighPri entries are admitted. +admit term=3 index=8 pri=HighPri +---- +[upd] mark:{Term:3 Index:8}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} + +append term=3 after=8 to=11 +---- +mark:{Term:3 Index:11}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} + +register term=3 index=9 pri=LowPri +---- +mark:{Term:3 Index:11}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} {Term:3 Index:9} + +register term=3 index=11 pri=LowPri +---- +mark:{Term:3 Index:11}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} {Term:3 Index:9} {Term:3 Index:11} + +# New term, removes a suffix of the log. +append term=4 after=9 to=10 +---- +[upd] mark:{Term:4 Index:10}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} {Term:3 Index:9} + +register term=4 index=10 pri=LowPri +---- +mark:{Term:4 Index:10}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} {Term:3 Index:9} {Term:4 Index:10} + +# New term, again removes a suffix of the log. +append term=5 after=8 to=9 +---- +[upd] mark:{Term:5 Index:9}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} + +register term=5 index=9 pri=LowPri +---- +mark:{Term:5 Index:9}, stable:8, admitted:[4 8 8 8] +LowPri: {Term:3 Index:5} {Term:5 Index:9} + +# New term, again removes a suffix of the log. +append term=6 after=6 to=7 +---- +[upd] mark:{Term:6 Index:7}, stable:6, admitted:[4 6 6 6] +LowPri: {Term:3 Index:5} + +register term=6 index=7 pri=LowPri +---- +mark:{Term:6 Index:7}, stable:6, admitted:[4 6 6 6] +LowPri: {Term:3 Index:5} {Term:6 Index:7} + +# New term, no suffix is removed. +append term=7 after=7 to=8 +---- +[upd] mark:{Term:7 Index:8}, stable:6, admitted:[4 6 6 6] +LowPri: {Term:3 Index:5} {Term:6 Index:7} + +register term=7 index=8 pri=LowPri +---- +mark:{Term:7 Index:8}, stable:6, admitted:[4 6 6 6] +LowPri: {Term:3 Index:5} {Term:6 Index:7} {Term:7 Index:8} + +# Index not found, but a prefix of the queue is removed. Note: the term 6 entry +# is removed due to being stale, even though its index is above the admitted. +admit term=7 index=6 pri=LowPri +---- +[upd] mark:{Term:7 Index:8}, stable:6, admitted:[6 6 6 6] +LowPri: {Term:7 Index:8} + +# A stale admission, no-op. +admit term=6 index=8 pri=LowPri +---- +mark:{Term:7 Index:8}, stable:6, admitted:[6 6 6 6] +LowPri: {Term:7 Index:8} + +sync term=7 index=8 +---- +[upd] mark:{Term:7 Index:8}, stable:8, admitted:[7 8 8 8] +LowPri: {Term:7 Index:8} + +# Everything is persisted and admitted. +admit term=7 index=8 pri=LowPri +---- +[upd] mark:{Term:7 Index:8}, stable:8, admitted:[8 8 8 8] + +# ------------------------------------------------------------------------------ diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/handle_raft_event b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/handle_raft_event new file mode 100644 index 000000000000..2bf6aefdb8bf --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/handle_raft_event @@ -0,0 +1,186 @@ +# Intialize a range with voters on s1,s2 and s3. The local replica and +# leaseholder will be s1. The leaseholder is denoted by the '*' suffix. +init +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=3 type=VOTER_FULL state=StateReplicate +---- +r1: [(n1,s1):1*,(n2,s2):2,(n3,s3):3] +t1/s1: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB +t1/s2: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB +t1/s3: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB + +# There should be no tracked entries for the range. +stream_state range_id=1 +---- +(n1,s1):1: state=replicate closed=false +++++ +(n2,s2):2: state=replicate closed=false +++++ +(n3,s3):3: state=replicate closed=false +++++ + +# Simulate a call to `HandleRaftEventRaftMuLocked` on s1 (leader/local +# replica). The event will have three entries, each 1MiB in size. Following, we +# see there are 3MiB of tokens deducted from each replica stream (both elastic +# and regular, as regular entries deduct from the elastic stream as well). +raft_event +range_id=1 + term=1 index=1 pri=NormalPri size=1MiB + term=1 index=2 pri=NormalPri size=1MiB + term=1 index=3 pri=NormalPri size=1MiB +---- +t1/s1: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB +t1/s2: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB +t1/s3: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB + +# The tracker should be tracking the three entries at indices 1..3, for each +# replica stream (1,2,3). +stream_state range_id=1 +---- +(n1,s1):1: state=replicate closed=false +NormalPri: + term=1 index=1 tokens=1048576 + term=1 index=2 tokens=1048576 + term=1 index=3 tokens=1048576 +++++ +(n2,s2):2: state=replicate closed=false +NormalPri: + term=1 index=1 tokens=1048576 + term=1 index=2 tokens=1048576 + term=1 index=3 tokens=1048576 +++++ +(n3,s3):3: state=replicate closed=false +NormalPri: + term=1 index=1 tokens=1048576 + term=1 index=2 tokens=1048576 + term=1 index=3 tokens=1048576 +++++ + +# Simulate the admitted index advancing to 3 for the same leader term (1) on a +# quorum of replicas. This should result in all of the tracked tokens (3MiB) +# being returned for s1,s2 and their trackers emptied. +admit +range_id=1 + store_id=1 term=1 to_index=3 pri=NormalPri + store_id=2 term=1 to_index=3 pri=NormalPri +---- +t1/s1: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB +t1/s2: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB +t1/s3: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB + +stream_state range_id=1 +---- +(n1,s1):1: state=replicate closed=false +++++ +(n2,s2):2: state=replicate closed=false +++++ +(n3,s3):3: state=replicate closed=false +NormalPri: + term=1 index=1 tokens=1048576 + term=1 index=2 tokens=1048576 + term=1 index=3 tokens=1048576 +++++ + +# Change the tracker state of s3 to be StateProbe, this should trigger token +# return for s3 and untracking all entries and closing the stream after a 1s +# delay. +set_replicas +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=3 type=VOTER_FULL state=StateProbe +---- +r1: [(n1,s1):1*,(n2,s2):2,(n3,s3):3] + +# Tick the clock by less than the probe to close delay, the stream should still +# be open in state probeRecentlyReplicate. +tick duration=500ms +---- +now=500ms + +stream_state range_id=1 +---- +(n1,s1):1: state=replicate closed=false +++++ +(n2,s2):2: state=replicate closed=false +++++ +(n3,s3):3: state=probeRecentlyReplicate closed=false +NormalPri: + term=1 index=1 tokens=1048576 + term=1 index=2 tokens=1048576 + term=1 index=3 tokens=1048576 +++++ + +# Tick the clock by the remaining probe to close delay, the stream should now +# be closed and all tokens returned. +tick duration=500ms +---- +now=1s + +stream_state range_id=1 +---- +(n1,s1):1: state=replicate closed=false +++++ +(n2,s2):2: state=replicate closed=false +++++ +(n3,s3):3: closed + +# Next, start a WaitForEval operation. We will update the state of s3 to be +# Replicate, which should trigger the WaitForEval to refresh. First, deduct all +# the tokens from s2 so that the operation is forced to wait. +raft_event +range_id=1 + term=1 index=4 pri=NormalPri size=16MiB +---- +t1/s1: reg=+0 B/+16 MiB ela=-8.0 MiB/+8.0 MiB +t1/s2: reg=+0 B/+16 MiB ela=-8.0 MiB/+8.0 MiB +t1/s3: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB + +wait_for_eval name=a range_id=1 pri=LowPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=low-pri done=false waited=false err= + +admit +range_id=1 + store_id=1 term=1 to_index=4 pri=NormalPri +---- +t1/s1: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB +t1/s2: reg=+0 B/+16 MiB ela=-8.0 MiB/+8.0 MiB +t1/s3: reg=+16 MiB/+16 MiB ela=+8.0 MiB/+8.0 MiB + +# The operation should still be waiting, as it requires all replicas which are +# in state replicate to have tokens available, s1 does but s2 doesn't. +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=low-pri done=false waited=false err= + +# Change the state of s3 to replicate and s2 to StateSnapshot, this should +# trigger the operation to refresh, ignore s2 now that it is in StateProbe and +# check s3 for available tokens as it is now in StateReplicate. +set_replicas +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateSnapshot + store_id=3 replica_id=3 type=VOTER_FULL state=StateReplicate +---- +r1: [(n1,s1):1*,(n2,s2):2,(n3,s3):3] + +raft_event +range_id=1 + term=1 index=5 pri=NormalPri size=1MiB + term=1 index=6 pri=NormalPri size=1MiB + term=1 index=7 pri=NormalPri size=1MiB +---- +t1/s1: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB +t1/s2: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB +t1/s3: reg=+13 MiB/+16 MiB ela=+5.0 MiB/+8.0 MiB + +# The operation should now be done and have waited for s1 and s3. +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=low-pri done=true waited=true err= diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/wait_for_eval b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/wait_for_eval new file mode 100644 index 000000000000..8b0fb4b73edb --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/range_controller/wait_for_eval @@ -0,0 +1,427 @@ +# Intialize a range with voters on s1,s2 and s3. The local replica and +# leaseholder will be s1. The leaseholder is denoted by the '*' suffix. Also +# set all streams to initially have 0 tokens and a limit of 1 token to simplify +# the test, as evaluation requests only wait for positive tokens. +init regular_limit=1 regular_init=0 elastic_limit=1 elastic_init=0 +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=3 type=VOTER_FULL state=StateReplicate +---- +r1: [(n1,s1):1*,(n2,s2):2,(n3,s3):3] +t1/s1: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s2: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B + +metrics +---- +kvflowcontrol.eval_wait.regular.requests.waiting : 0 +kvflowcontrol.eval_wait.regular.requests.admitted : 0 +kvflowcontrol.eval_wait.regular.requests.errored : 0 +kvflowcontrol.eval_wait.regular.requests.bypassed : 0 +kvflowcontrol.eval_wait.regular.duration.count : 0 +kvflowcontrol.eval_wait.elastic.requests.waiting : 0 +kvflowcontrol.eval_wait.elastic.requests.admitted : 0 +kvflowcontrol.eval_wait.elastic.requests.errored : 0 +kvflowcontrol.eval_wait.elastic.requests.bypassed : 0 +kvflowcontrol.eval_wait.elastic.duration.count : 0 + +# Start a high priority evaluation. It should not complete due to lack of +# tokens. +wait_for_eval name=a range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=false waited=false err= + +# Start a low priority evaluation. It should also not complete. +wait_for_eval name=b range_id=1 pri=LowPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=false waited=false err= + name=b pri=low-pri done=false waited=false err= + +# Add high priority tokens to the first store. This is not enough for quorum. +adjust_tokens + store_id=1 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B + +# Cancel the context for the high priority evaluation 'a'. +cancel_context range_id=1 name=a +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=false waited=false err= + +# Add high priority tokens to the second store. 'b' is elastic so it should not +# complete despite having a quorum of streams with available tokens. +adjust_tokens + store_id=2 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B + +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=false waited=false err= + +# Add high priority tokens to the third store. Now all stores have positive +# tokens and 'b' should complete. +adjust_tokens + store_id=3 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+1 B/+1 B ela=+1 B/+1 B + +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + +# Change the replica set: replace replica 3 with a new replica 4. +set_replicas +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=4 replica_id=4 type=VOTER_FULL state=StateReplicate +---- +r1: [(n1,s1):1*,(n2,s2):2,(n4,s4):4] + +adjust_tokens + store_id=1 pri=HighPri tokens=-1 +---- +t1/s1: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B + +# Start a new high priority evaluation 'c'. It should not complete due to lack +# of quorum. +wait_for_eval name=c range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=false waited=false err= + +# Add high priority tokens back to the first store, restoring quorum. +adjust_tokens + store_id=1 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B + +# Check the state. The high priority evaluation 'c' should now complete. +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + +# Test behavior with a non-voter replica. +set_replicas +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=3 type=VOTER_FULL state=StateReplicate + store_id=4 replica_id=4 type=NON_VOTER state=StateReplicate +---- +r1: [(n1,s1):1*,(n2,s2):2,(n3,s3):3,(n4,s4):4NON_VOTER] + +# Start a new high priority evaluation 'd'. +wait_for_eval name=d range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + +# Remove tokens from s3, s1 and s2 have tokens which is enough for quorum. +adjust_tokens + store_id=3 pri=HighPri tokens=-1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B + +# Check the state. The high priority evaluation 'd' should complete despite the +# non-voter replica lacking tokens. +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + +# Test behavior when changing leaseholder. +set_leaseholder range_id=1 replica_id=2 +---- +r1: [(n1,s1):1,(n2,s2):2*,(n3,s3):3,(n4,s4):4NON_VOTER] + +# Start a new high priority evaluation 'e'. This evaluation completes +# immediately because there are already sufficient tokens for the new +# leaseholder. +wait_for_eval name=e range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + +# Start another evaluation on a new range, which will intersect some of the +# stores of the first range. The evaluation on the first range will not +# complete until all streams have tokens, whereas the high priority evaluation +# on the second range will complete once a quorum has available tokens. +set_replicas +range_id=1 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=2 replica_id=2 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=5 type=VOTER_FULL state=StateReplicate +range_id=2 tenant_id=1 local_replica_id=1 + store_id=1 replica_id=1 type=VOTER_FULL state=StateReplicate + store_id=3 replica_id=3 type=VOTER_FULL state=StateReplicate + store_id=5 replica_id=5 type=VOTER_FULL state=StateReplicate +---- +r1: [(n1,s1):1,(n2,s2):2*,(n3,s3):5] +r2: [(n1,s1):1*,(n3,s3):3,(n5,s5):5] + +set_leaseholder range_id=1 replica_id=4 +---- +r1: [(n1,s1):1,(n2,s2):2,(n3,s3):5] +r2: [(n1,s1):1*,(n3,s3):3,(n5,s5):5] + +wait_for_eval name=f range_id=1 pri=LowPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + +wait_for_eval name=g range_id=2 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=false waited=false err= + +adjust_tokens + store_id=5 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +# Adding elastic tokens to s3 should complete the low priority evaluation 'f', +# as all stores now have elastic tokens available. +adjust_tokens + store_id=3 pri=LowPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+1 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +metrics +---- +kvflowcontrol.eval_wait.regular.requests.waiting : 0 +kvflowcontrol.eval_wait.regular.requests.admitted : 4 +kvflowcontrol.eval_wait.regular.requests.errored : 1 +kvflowcontrol.eval_wait.regular.requests.bypassed : 0 +kvflowcontrol.eval_wait.regular.duration.count : 5 +kvflowcontrol.eval_wait.elastic.requests.waiting : 0 +kvflowcontrol.eval_wait.elastic.requests.admitted : 2 +kvflowcontrol.eval_wait.elastic.requests.errored : 0 +kvflowcontrol.eval_wait.elastic.requests.bypassed : 0 +kvflowcontrol.eval_wait.elastic.duration.count : 2 + +# Adjust the tokens so that r1 doesn't have tokens on s3 or s1, then transfer +# s3 the lease for r1. +adjust_tokens + store_id=3 pri=LowPri tokens=-1 + store_id=1 pri=HighPri tokens=-1 +---- +t1/s1: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +set_leaseholder range_id=1 replica_id=5 +---- +r1: [(n1,s1):1,(n2,s2):2,(n3,s3):5*] +r2: [(n1,s1):1*,(n3,s3):3,(n5,s5):5] + +# Start another evaluation 'h' on r1. It should not complete as the leaseholder +# (s3) doesn't have available tokens and the leader (s1) doesn't have tokens. +wait_for_eval name=h range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= + name=h pri=high-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +# Add tokens to s3, this should not complete 'h' as the leader of r1 (s1) does +# not have tokens. +adjust_tokens + store_id=3 pri=HighPri tokens=1 +---- +t1/s1: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +# Start another evaluation 'i' on r1, it should also not complete until the +# leader (s1) has tokens, despite both the leaseholder (s3) and a quorum +# (s2,s3) having tokens available. Similar to above. +wait_for_eval name=i range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= + name=h pri=high-pri done=false waited=false err= + name=i pri=high-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +# Finally, add tokens to s1 to complete both evaluations 'h' and 'i'. +adjust_tokens + store_id=1 pri=HighPri tokens=1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +check_state +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= + name=h pri=high-pri done=true waited=true err= + name=i pri=high-pri done=true waited=true err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +# No tokens on s3. +adjust_tokens + store_id=3 pri=HighPri tokens=-1 +---- +t1/s1: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s2: reg=+1 B/+1 B ela=+1 B/+1 B +t1/s3: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s4: reg=+0 B/+1 B ela=+0 B/+1 B +t1/s5: reg=+1 B/+1 B ela=+1 B/+1 B + +# Start an evaluation 'j' on r1, that does not complete since the leaseholder +# (s3) has tokens. +wait_for_eval name=j range_id=1 pri=HighPri +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= + name=h pri=high-pri done=true waited=true err= + name=i pri=high-pri done=true waited=true err= + name=j pri=high-pri done=false waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +# Close all the RangeControllers. Evaluation 'j' is done, but specifies waited +# is false, and error is nil. +close_rcs +---- +range_id=1 tenant_id={1} local_replica_id=1 + name=a pri=high-pri done=true waited=false err=context canceled + name=b pri=low-pri done=true waited=true err= + name=c pri=high-pri done=true waited=true err= + name=d pri=high-pri done=true waited=true err= + name=e pri=high-pri done=true waited=true err= + name=f pri=low-pri done=true waited=true err= + name=h pri=high-pri done=true waited=true err= + name=i pri=high-pri done=true waited=true err= + name=j pri=high-pri done=true waited=false err= +range_id=2 tenant_id={1} local_replica_id=1 + name=g pri=high-pri done=true waited=true err= + +metrics +---- +kvflowcontrol.eval_wait.regular.requests.waiting : 0 +kvflowcontrol.eval_wait.regular.requests.admitted : 6 +kvflowcontrol.eval_wait.regular.requests.errored : 1 +kvflowcontrol.eval_wait.regular.requests.bypassed : 1 +kvflowcontrol.eval_wait.regular.duration.count : 8 +kvflowcontrol.eval_wait.elastic.requests.waiting : 0 +kvflowcontrol.eval_wait.elastic.requests.admitted : 2 +kvflowcontrol.eval_wait.elastic.requests.errored : 0 +kvflowcontrol.eval_wait.elastic.requests.bypassed : 0 +kvflowcontrol.eval_wait.elastic.duration.count : 2 diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_adjustment b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_adjustment index 5cb66ae0d78d..b87891640db0 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_adjustment +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_adjustment @@ -1,6 +1,21 @@ -init +init stream=1 ---- +metrics +---- +kvflowcontrol.streams.eval.regular.total_count : 1 +kvflowcontrol.streams.eval.regular.blocked_count: 0 +kvflowcontrol.tokens.eval.regular.available : 16777216 +kvflowcontrol.tokens.eval.regular.deducted : 0 +kvflowcontrol.tokens.eval.regular.returned : 0 +kvflowcontrol.tokens.eval.regular.unaccounted : 0 +kvflowcontrol.streams.eval.elastic.total_count : 1 +kvflowcontrol.streams.eval.elastic.blocked_count: 0 +kvflowcontrol.tokens.eval.elastic.available : 8388608 +kvflowcontrol.tokens.eval.elastic.deducted : 0 +kvflowcontrol.tokens.eval.elastic.returned : 0 +kvflowcontrol.tokens.eval.elastic.unaccounted : 0 + adjust class=regular delta=-1MiB class=regular delta=-7MiB @@ -36,9 +51,44 @@ history +2.0MiB elastic +10MiB | +2.0MiB +6.0MiB regular +16MiB | +8.0MiB -init +# Despite the elastic stream being unblocked by the time metrics is called, the +# stream was blocked for a non-zero duration between metric calls so we expect +# elastic.blocked_count=1. +metrics +---- +kvflowcontrol.streams.eval.regular.total_count : 1 +kvflowcontrol.streams.eval.regular.blocked_count: 0 +kvflowcontrol.tokens.eval.regular.available : 16777216 +kvflowcontrol.tokens.eval.regular.deducted : 16777216 +kvflowcontrol.tokens.eval.regular.returned : 16777216 +kvflowcontrol.tokens.eval.regular.unaccounted : 0 +kvflowcontrol.streams.eval.elastic.total_count : 1 +kvflowcontrol.streams.eval.elastic.blocked_count: 0 +kvflowcontrol.tokens.eval.elastic.available : 8388608 +kvflowcontrol.tokens.eval.elastic.deducted : 18874368 +kvflowcontrol.tokens.eval.elastic.returned : 18874368 +kvflowcontrol.tokens.eval.elastic.unaccounted : 0 + +init stream=2 ---- +# There should now be two streams in the metrics, double the available tokens +# for each work class. +metrics +---- +kvflowcontrol.streams.eval.regular.total_count : 2 +kvflowcontrol.streams.eval.regular.blocked_count: 0 +kvflowcontrol.tokens.eval.regular.available : 33554432 +kvflowcontrol.tokens.eval.regular.deducted : 16777216 +kvflowcontrol.tokens.eval.regular.returned : 16777216 +kvflowcontrol.tokens.eval.regular.unaccounted : 0 +kvflowcontrol.streams.eval.elastic.total_count : 2 +kvflowcontrol.streams.eval.elastic.blocked_count: 0 +kvflowcontrol.tokens.eval.elastic.available : 16777216 +kvflowcontrol.tokens.eval.elastic.deducted : 18874368 +kvflowcontrol.tokens.eval.elastic.returned : 18874368 +kvflowcontrol.tokens.eval.elastic.unaccounted : 0 + adjust class=elastic delta=-7MiB class=regular delta=-7MiB @@ -47,9 +97,11 @@ class=regular delta=-1MiB class=regular delta=-6MiB class=regular delta=+6MiB class=regular delta=-9MiB -class=regular delta=+16MiB +class=regular delta=+17MiB +class=elastic delta=+1MiB ---- + history ---- regular | elastic @@ -62,4 +114,39 @@ history -6.0MiB regular +2.0MiB | -7.0MiB (elastic blocked) +6.0MiB regular +8.0MiB | -1.0MiB (elastic blocked) -9.0MiB regular -1.0MiB | -10MiB (regular and elastic blocked) - +16MiB regular +15MiB | +6.0MiB + +17MiB regular +16MiB | +7.0MiB + +1.0MiB elastic +16MiB | +8.0MiB + +metrics +---- +kvflowcontrol.streams.eval.regular.total_count : 2 +kvflowcontrol.streams.eval.regular.blocked_count: 0 +kvflowcontrol.tokens.eval.regular.available : 33554432 +kvflowcontrol.tokens.eval.regular.deducted : 40894464 +kvflowcontrol.tokens.eval.regular.returned : 40894464 +kvflowcontrol.tokens.eval.regular.unaccounted : 0 +kvflowcontrol.streams.eval.elastic.total_count : 2 +kvflowcontrol.streams.eval.elastic.blocked_count: 0 +kvflowcontrol.tokens.eval.elastic.available : 16777216 +kvflowcontrol.tokens.eval.elastic.deducted : 50331648 +kvflowcontrol.tokens.eval.elastic.returned : 50331648 +kvflowcontrol.tokens.eval.elastic.unaccounted : 0 + +adjust +class=regular delta=-16MiB +---- + +metrics +---- +kvflowcontrol.streams.eval.regular.total_count : 2 +kvflowcontrol.streams.eval.regular.blocked_count: 1 +kvflowcontrol.tokens.eval.regular.available : 16777216 +kvflowcontrol.tokens.eval.regular.deducted : 57671680 +kvflowcontrol.tokens.eval.regular.returned : 40894464 +kvflowcontrol.tokens.eval.regular.unaccounted : 0 +kvflowcontrol.streams.eval.elastic.total_count : 2 +kvflowcontrol.streams.eval.elastic.blocked_count: 1 +kvflowcontrol.tokens.eval.elastic.available : 0 +kvflowcontrol.tokens.eval.elastic.deducted : 67108864 +kvflowcontrol.tokens.eval.elastic.returned : 50331648 +kvflowcontrol.tokens.eval.elastic.unaccounted : 0 diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_tracker b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_tracker new file mode 100644 index 000000000000..0a097e024a61 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/token_tracker @@ -0,0 +1,148 @@ +track +term=1 index=10 tokens=100 pri=LowPri +term=1 index=20 tokens=200 pri=NormalPri +term=1 index=30 tokens=300 pri=HighPri +---- +tracked: term=1 index=10 tokens=100 pri=LowPri +tracked: term=1 index=20 tokens=200 pri=NormalPri +tracked: term=1 index=30 tokens=300 pri=HighPri + +state +---- +LowPri: + term=1 index=10 tokens=100 +NormalPri: + term=1 index=20 tokens=200 +HighPri: + term=1 index=30 tokens=300 + +# The Tracker should maintain correct ordering even when entries are not added +# in ascending index order but are in-order w.r.t priority. +track +term=2 index=50 tokens=500 pri=NormalPri +term=2 index=40 tokens=400 pri=LowPri +term=2 index=51 tokens=400 pri=LowPri +---- +tracked: term=2 index=50 tokens=500 pri=NormalPri +tracked: term=2 index=40 tokens=400 pri=LowPri +tracked: term=2 index=51 tokens=400 pri=LowPri + +state +---- +LowPri: + term=1 index=10 tokens=100 + term=2 index=40 tokens=400 + term=2 index=51 tokens=400 +NormalPri: + term=1 index=20 tokens=200 + term=2 index=50 tokens=500 +HighPri: + term=1 index=30 tokens=300 + +track +term=3 index=60 tokens=600 pri=HighPri +term=3 index=70 tokens=700 pri=LowPri +---- +tracked: term=3 index=60 tokens=600 pri=HighPri +tracked: term=3 index=70 tokens=700 pri=LowPri + +state +---- +LowPri: + term=1 index=10 tokens=100 + term=2 index=40 tokens=400 + term=2 index=51 tokens=400 + term=3 index=70 tokens=700 +NormalPri: + term=1 index=20 tokens=200 + term=2 index=50 tokens=500 +HighPri: + term=1 index=30 tokens=300 + term=3 index=60 tokens=600 + +untrack term=2 +LowPri=45 +NormalPri=0 +HighPri=0 +---- +returned: tokens=500 pri=LowPri +returned: tokens=200 pri=NormalPri +returned: tokens=300 pri=HighPri + +state +---- +LowPri: + term=2 index=51 tokens=400 + term=3 index=70 tokens=700 +NormalPri: + term=2 index=50 tokens=500 +HighPri: + term=3 index=60 tokens=600 + +# Untrack with a higher term. +untrack term=3 +NormalPri=60 +HighPri=60 +---- +returned: tokens=400 pri=LowPri +returned: tokens=500 pri=NormalPri +returned: tokens=600 pri=HighPri + +state +---- +LowPri: + term=3 index=70 tokens=700 + +untrack_ge index=40 +---- +returned: tokens=700 pri=LowPri + +state +---- + +untrack_all +---- + +state +---- + +# Test tracking and untracking with different terms +track +term=4 index=80 tokens=800 pri=NormalPri +term=5 index=90 tokens=900 pri=NormalPri +term=5 index=100 tokens=999 pri=HighPri +---- +tracked: term=4 index=80 tokens=800 pri=NormalPri +tracked: term=5 index=90 tokens=900 pri=NormalPri +tracked: term=5 index=100 tokens=999 pri=HighPri + +state +---- +NormalPri: + term=4 index=80 tokens=800 + term=5 index=90 tokens=900 +HighPri: + term=5 index=100 tokens=999 + +untrack term=4 +NormalPri=95 +HighPri=95 +---- +returned: tokens=800 pri=NormalPri + +state +---- +NormalPri: + term=5 index=90 tokens=900 +HighPri: + term=5 index=100 tokens=999 + +untrack_all +---- +returned: tokens=900 pri=NormalPri +returned: tokens=999 pri=HighPri + +state +---- + +# vim:ft=sh diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/wait_for_eval b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/wait_for_eval new file mode 100644 index 000000000000..47b88217bb96 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/testdata/wait_for_eval @@ -0,0 +1,262 @@ +# This test demonstrates the behavior of concurrent WaitForEval operations with +# binary token state management. +# +# Start the first WaitForEval operation with a quorum of 2 and three handles. +wait_for_eval name=a quorum=2 +handle: stream=s1 required=true +handle: stream=s2 required=true +handle: stream=s3 required=false +---- +a: waiting + +# Start a second WaitForEval operation with a quorum of 2 and three handles. +wait_for_eval name=b quorum=2 +handle: stream=s4 required=true +handle: stream=s5 required=true +handle: stream=s6 required=false +---- +a: waiting +b: waiting + +# Set tokens for streams s1 and s2 to positive, this should trigger the +# completion of a but not b. +set_tokens s1=positive s2=positive +---- +s1: positive +s2: positive +s3: non-positive +s4: non-positive +s5: non-positive +s6: non-positive + +check_state +---- +a: wait_success +b: waiting + +# Set tokens for stream s4 and s6 to positive, this won't trigger b as s5 is +# required. +set_tokens s4=positive s6=positive +---- +s1: positive +s2: positive +s3: non-positive +s4: positive +s5: non-positive +s6: positive + +# Check the state. b should still be waiting as s5 (required) has no tokens. +check_state +---- +a: wait_success +b: waiting + +# Now set s5 to positive, which should complete b. Revert s1 to non-positive. +set_tokens s5=positive s1=non-positive +---- +s1: non-positive +s2: positive +s3: non-positive +s4: positive +s5: positive +s6: positive + +check_state +---- +a: wait_success +b: wait_success + +# Test out multiple operations with overlapping streams +wait_for_eval name=c quorum=2 +handle: stream=s1 required=false +handle: stream=s4 required=false +handle: stream=s7 required=false +---- +a: wait_success +b: wait_success +c: waiting + +wait_for_eval name=d quorum=3 +handle: stream=s2 required=true +handle: stream=s3 required=true +handle: stream=s7 required=true +---- +a: wait_success +b: wait_success +c: waiting +d: waiting + +check_state +---- +a: wait_success +b: wait_success +c: waiting +d: waiting + +# Set s7 to positive, which should complete c but not d, as d has s5 required. +set_tokens s7=positive +---- +s1: non-positive +s2: positive +s3: non-positive +s4: positive +s5: positive +s6: positive +s7: positive + +check_state +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting + +wait_for_eval name=e quorum=0 +handle: stream=s8 required=true +handle: stream=s9 required=true +handle: stream=s10 required=true +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: waiting + +# Set only s8 to positive +set_tokens s8=positive +---- +s1: non-positive +s10: non-positive +s2: positive +s3: non-positive +s4: positive +s5: positive +s6: positive +s7: positive +s8: positive +s9: non-positive + +# Cancel e before it completes. +cancel name=e +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled + +# Test out refresh signal on f. +wait_for_eval name=f quorum=2 +handle: stream=s11 required=true +handle: stream=s12 required=true +handle: stream=s13 required=false +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled +f: waiting + +# Send a refresh signal before any tokens are available +refresh name=f +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled +f: refresh_wait_signaled + +# Lastly, test out a WaitForEval operation with 3 handles overlapping the next +# WaitForEval operation. +wait_for_eval name=g quorum=3 +handle: stream=s14 required=true +handle: stream=s15 required=true +handle: stream=s16 required=true +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled +f: refresh_wait_signaled +g: waiting + +wait_for_eval name=h quorum=2 +handle: stream=s14 required=false +handle: stream=s15 required=true +handle: stream=s17 required=true +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled +f: refresh_wait_signaled +g: waiting +h: waiting + +# Set some tokens to positive. +set_tokens s14=positive s15=positive s17=positive +---- +s1: non-positive +s10: non-positive +s11: non-positive +s12: non-positive +s13: non-positive +s14: positive +s15: positive +s16: non-positive +s17: positive +s2: positive +s3: non-positive +s4: positive +s5: positive +s6: positive +s7: positive +s8: positive +s9: non-positive + +check_state +---- +a: wait_success +b: wait_success +c: wait_success +d: waiting +e: context_cancelled +f: refresh_wait_signaled +g: waiting +h: wait_success + +# Set the last required token for g to positive, as well as d. +set_tokens s16=positive s3=positive +---- +s1: non-positive +s10: non-positive +s11: non-positive +s12: non-positive +s13: non-positive +s14: positive +s15: positive +s16: positive +s17: positive +s2: positive +s3: positive +s4: positive +s5: positive +s6: positive +s7: positive +s8: positive +s9: non-positive + +check_state +---- +a: wait_success +b: wait_success +c: wait_success +d: wait_success +e: context_cancelled +f: refresh_wait_signaled +g: wait_success +h: wait_success diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go index a6f1a8efa745..cbf94cafaab3 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter.go @@ -12,39 +12,21 @@ package rac2 import ( "context" + "fmt" + "reflect" + "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" ) -// TokenCounter is the interface for a token counter that can be used to deduct -// and return flow control tokens. Additionally, it can be used to wait for -// tokens to become available, and to check if tokens are available without -// blocking. -// -// TODO(kvoli): Consider de-interfacing if not necessary for testing. -type TokenCounter interface { - // TokensAvailable returns true if tokens are available. If false, it returns - // a handle that may be used for waiting for tokens to become available. - TokensAvailable(admissionpb.WorkClass) (available bool, tokenWaitingHandle TokenWaitingHandle) - // TryDeduct attempts to deduct flow tokens for the given work class. If - // there are no tokens available, 0 tokens are returned. When less than the - // requested token count is available, partial tokens are returned - // corresponding to this partial amount. - TryDeduct( - context.Context, admissionpb.WorkClass, kvflowcontrol.Tokens) kvflowcontrol.Tokens - // Deduct deducts (without blocking) flow tokens for the given work class. If - // there are not enough available tokens, the token counter will go into debt - // (negative available count) and still issue the requested number of tokens. - Deduct(context.Context, admissionpb.WorkClass, kvflowcontrol.Tokens) - // Return returns flow tokens for the given work class. - Return(context.Context, admissionpb.WorkClass, kvflowcontrol.Tokens) -} - // TokenWaitingHandle is the interface for waiting for positive tokens from a // token counter. type TokenWaitingHandle interface { @@ -100,50 +82,67 @@ type tokenCounterPerWorkClass struct { // needs to schedule the goroutine that got the entry for it to unblock // another. signalCh chan struct{} + stats struct { + deltaStats + noTokenStartTime time.Time + } +} + +type deltaStats struct { + noTokenDuration time.Duration + tokensDeducted, tokensReturned kvflowcontrol.Tokens } func makeTokenCounterPerWorkClass( - wc admissionpb.WorkClass, limit kvflowcontrol.Tokens, + wc admissionpb.WorkClass, limit kvflowcontrol.Tokens, now time.Time, ) tokenCounterPerWorkClass { - return tokenCounterPerWorkClass{ + twc := tokenCounterPerWorkClass{ wc: wc, tokens: limit, limit: limit, signalCh: make(chan struct{}, 1), } + twc.stats.noTokenStartTime = now + return twc } // adjustTokensLocked adjusts the tokens for the given work class by delta. func (twc *tokenCounterPerWorkClass) adjustTokensLocked( - ctx context.Context, delta kvflowcontrol.Tokens, -) { - var unaccounted kvflowcontrol.Tokens + ctx context.Context, delta kvflowcontrol.Tokens, now time.Time, +) (adjustment, unaccounted kvflowcontrol.Tokens) { before := twc.tokens twc.tokens += delta - - if delta <= 0 { - // Nothing left to do, since we know tokens didn't increase. - return - } - if twc.tokens > twc.limit { - unaccounted = twc.tokens - twc.limit - twc.tokens = twc.limit - } - if before <= 0 && twc.tokens > 0 { - twc.signal() + if delta > 0 { + twc.stats.tokensReturned += delta + if twc.tokens > twc.limit { + unaccounted = twc.tokens - twc.limit + twc.tokens = twc.limit + } + if before <= 0 && twc.tokens > 0 { + twc.signal() + twc.stats.noTokenDuration += now.Sub(twc.stats.noTokenStartTime) + } + } else { + twc.stats.tokensDeducted -= delta + if before > 0 && twc.tokens <= 0 { + twc.stats.noTokenStartTime = now + } } if buildutil.CrdbTestBuild && unaccounted != 0 { log.Fatalf(ctx, "unaccounted[%s]=%d delta=%d limit=%d", twc.wc, unaccounted, delta, twc.limit) } + + adjustment = twc.tokens - before + return adjustment, unaccounted } func (twc *tokenCounterPerWorkClass) setLimitLocked( - ctx context.Context, limit kvflowcontrol.Tokens, + ctx context.Context, limit kvflowcontrol.Tokens, now time.Time, ) { before := twc.limit twc.limit = limit - twc.adjustTokensLocked(ctx, twc.limit-before) + twc.adjustTokensLocked(ctx, twc.limit-before, now) } func (twc *tokenCounterPerWorkClass) signal() { @@ -154,6 +153,18 @@ func (twc *tokenCounterPerWorkClass) signal() { } } +func (twc *tokenCounterPerWorkClass) getAndResetStats(now time.Time) deltaStats { + stats := twc.stats.deltaStats + if twc.tokens <= 0 { + stats.noTokenDuration += now.Sub(twc.stats.noTokenStartTime) + } + twc.stats.deltaStats = deltaStats{} + // Doesn't matter if bwc.tokens is actually > 0 since in that case we won't + // use this value. + twc.stats.noTokenStartTime = now + return stats +} + type tokensPerWorkClass struct { regular, elastic kvflowcontrol.Tokens } @@ -163,6 +174,8 @@ type tokensPerWorkClass struct { // returning and waiting for flow tokens. type tokenCounter struct { settings *cluster.Settings + clock *hlc.Clock + metrics *TokenCounterMetrics mu struct { syncutil.RWMutex @@ -171,29 +184,35 @@ type tokenCounter struct { } } -var _ TokenCounter = &tokenCounter{} - -func newTokenCounter(settings *cluster.Settings) *tokenCounter { +// newTokenCounter creates a new TokenCounter. +func newTokenCounter( + settings *cluster.Settings, clock *hlc.Clock, metrics *TokenCounterMetrics, +) *tokenCounter { t := &tokenCounter{ settings: settings, + clock: clock, + metrics: metrics, } limit := tokensPerWorkClass{ regular: kvflowcontrol.Tokens(kvflowcontrol.RegularTokensPerStream.Get(&settings.SV)), elastic: kvflowcontrol.Tokens(kvflowcontrol.ElasticTokensPerStream.Get(&settings.SV)), } + now := clock.PhysicalTime() + t.mu.counters[admissionpb.RegularWorkClass] = makeTokenCounterPerWorkClass( - admissionpb.RegularWorkClass, limit.regular) + admissionpb.RegularWorkClass, limit.regular, now) t.mu.counters[admissionpb.ElasticWorkClass] = makeTokenCounterPerWorkClass( - admissionpb.ElasticWorkClass, limit.elastic) + admissionpb.ElasticWorkClass, limit.elastic, now) onChangeFunc := func(ctx context.Context) { + now := t.clock.PhysicalTime() t.mu.Lock() defer t.mu.Unlock() t.mu.counters[admissionpb.RegularWorkClass].setLimitLocked( - ctx, kvflowcontrol.Tokens(kvflowcontrol.RegularTokensPerStream.Get(&settings.SV))) + ctx, kvflowcontrol.Tokens(kvflowcontrol.RegularTokensPerStream.Get(&settings.SV)), now) t.mu.counters[admissionpb.ElasticWorkClass].setLimitLocked( - ctx, kvflowcontrol.Tokens(kvflowcontrol.ElasticTokensPerStream.Get(&settings.SV))) + ctx, kvflowcontrol.Tokens(kvflowcontrol.ElasticTokensPerStream.Get(&settings.SV)), now) } kvflowcontrol.RegularTokensPerStream.SetOnChange(&settings.SV, onChangeFunc) @@ -201,6 +220,22 @@ func newTokenCounter(settings *cluster.Settings) *tokenCounter { return t } +// String returns a string representation of the token counter. +func (b *tokenCounter) String() string { + return redact.StringWithoutMarkers(b) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (b *tokenCounter) SafeFormat(w redact.SafePrinter, _ rune) { + b.mu.RLock() + defer b.mu.RUnlock() + w.Printf("reg=%v/%v ela=%v/%v", + b.mu.counters[admissionpb.RegularWorkClass].tokens, + b.mu.counters[admissionpb.RegularWorkClass].limit, + b.mu.counters[admissionpb.ElasticWorkClass].tokens, + b.mu.counters[admissionpb.ElasticWorkClass].limit) +} + func (t *tokenCounter) tokens(wc admissionpb.WorkClass) kvflowcontrol.Tokens { t.mu.RLock() defer t.mu.RUnlock() @@ -229,6 +264,7 @@ func (t *tokenCounter) TokensAvailable( func (t *tokenCounter) TryDeduct( ctx context.Context, wc admissionpb.WorkClass, tokens kvflowcontrol.Tokens, ) kvflowcontrol.Tokens { + now := t.clock.PhysicalTime() t.mu.Lock() defer t.mu.Unlock() @@ -238,7 +274,7 @@ func (t *tokenCounter) TryDeduct( } adjust := min(tokensAvailable, tokens) - t.adjustLocked(ctx, wc, -adjust) + t.adjustLocked(ctx, wc, -adjust, now) return adjust } @@ -302,27 +338,175 @@ func (wh waitHandle) ConfirmHaveTokensAndUnblockNextWaiter() (haveTokens bool) { return haveTokens } +type tokenWaitingHandleInfo struct { + handle TokenWaitingHandle + // For regular work, this will be set for the leaseholder and leader. For + // elastic work this will be set for the aforementioned, and all replicas + // which are in StateReplicate. + requiredWait bool +} + +// WaitEndState is the state returned by WaitForEval and indicates the result +// of waiting. +type WaitEndState int32 + +const ( + // WaitSuccess indicates that the required quorum and required wait handles + // were signaled and had tokens available. + WaitSuccess WaitEndState = iota + // ContextCanceled indicates that the context was canceled. + ContextCanceled + // RefreshWaitSignaled indicates that the refresh channel was signaled. + RefreshWaitSignaled +) + +func (s WaitEndState) String() string { + return redact.StringWithoutMarkers(s) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (s WaitEndState) SafeFormat(w redact.SafePrinter, _ rune) { + switch s { + case WaitSuccess: + w.Print("wait_success") + case ContextCanceled: + w.Print("context_cancelled") + case RefreshWaitSignaled: + w.Print("refresh_wait_signaled") + default: + panic(fmt.Sprintf("unknown wait_end_state(%d)", int(s))) + } +} + +// WaitForEval waits for a quorum of handles to be signaled and have tokens +// available, including all the required wait handles. The caller can provide a +// refresh channel, which when signaled will cause the function to return +// RefreshWaitSignaled, allowing the caller to retry waiting with updated +// handles. +func WaitForEval( + ctx context.Context, + refreshWaitCh <-chan struct{}, + handles []tokenWaitingHandleInfo, + requiredQuorum int, + scratch []reflect.SelectCase, +) (state WaitEndState, scratch2 []reflect.SelectCase) { + scratch = scratch[:0] + if len(handles) < requiredQuorum { + log.Fatalf(ctx, "%v", errors.AssertionFailedf( + "invalid arguments to WaitForEval: len(handles)=%d < required_quorum=%d", + len(handles), requiredQuorum)) + } + + scratch = append(scratch, + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())}) + scratch = append(scratch, + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(refreshWaitCh)}) + + requiredWaitCount := 0 + for _, h := range handles { + if h.requiredWait { + requiredWaitCount++ + } + scratch = append(scratch, + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(h.handle.WaitChannel())}) + } + if requiredQuorum == 0 && requiredWaitCount == 0 { + log.Fatalf(ctx, "both requiredQuorum and requiredWaitCount are zero") + } + + // m is the current length of the scratch slice. + m := len(scratch) + signaledCount := 0 + + // Wait for (1) at least a quorumCount of handles to be signaled and have + // available tokens; as well as (2) all of the required wait handles to be + // signaled and have tokens available. + for signaledCount < requiredQuorum || requiredWaitCount > 0 { + chosen, _, _ := reflect.Select(scratch) + switch chosen { + case 0: + return ContextCanceled, scratch + case 1: + return RefreshWaitSignaled, scratch + default: + handleInfo := handles[chosen-2] + if available := handleInfo.handle.ConfirmHaveTokensAndUnblockNextWaiter(); !available { + // The handle was signaled but does not currently have tokens + // available. Continue waiting on this handle. + continue + } + + signaledCount++ + if handleInfo.requiredWait { + requiredWaitCount-- + } + m-- + scratch[chosen], scratch[m] = scratch[m], scratch[chosen] + scratch = scratch[:m] + handles[chosen-2], handles[m-2] = handles[m-2], handles[chosen-2] + } + } + + return WaitSuccess, scratch +} + // adjust the tokens for the given work class by delta. The adjustment is // performed atomically. func (t *tokenCounter) adjust( ctx context.Context, class admissionpb.WorkClass, delta kvflowcontrol.Tokens, ) { + now := t.clock.PhysicalTime() t.mu.Lock() defer t.mu.Unlock() - t.adjustLocked(ctx, class, delta) + t.adjustLocked(ctx, class, delta, now) } func (t *tokenCounter) adjustLocked( - ctx context.Context, class admissionpb.WorkClass, delta kvflowcontrol.Tokens, + ctx context.Context, class admissionpb.WorkClass, delta kvflowcontrol.Tokens, now time.Time, ) { + var adjustment, unaccounted tokensPerWorkClass switch class { case admissionpb.RegularWorkClass: - t.mu.counters[admissionpb.RegularWorkClass].adjustTokensLocked(ctx, delta) - // Regular {deductions,returns} also affect elastic flow tokens. - t.mu.counters[admissionpb.ElasticWorkClass].adjustTokensLocked(ctx, delta) + adjustment.regular, unaccounted.regular = + t.mu.counters[admissionpb.RegularWorkClass].adjustTokensLocked(ctx, delta, now) + // Regular {deductions,returns} also affect elastic flow tokens. + adjustment.elastic, unaccounted.elastic = + t.mu.counters[admissionpb.ElasticWorkClass].adjustTokensLocked(ctx, delta, now) + case admissionpb.ElasticWorkClass: // Elastic {deductions,returns} only affect elastic flow tokens. - t.mu.counters[admissionpb.ElasticWorkClass].adjustTokensLocked(ctx, delta) + adjustment.elastic, unaccounted.elastic = + t.mu.counters[admissionpb.ElasticWorkClass].adjustTokensLocked(ctx, delta, now) + } + + // Adjust metrics if any tokens were actually adjusted or unaccounted for + // tokens were detected. + if adjustment.regular != 0 || adjustment.elastic != 0 { + t.metrics.onTokenAdjustment(adjustment) + } + if unaccounted.regular != 0 || unaccounted.elastic != 0 { + t.metrics.onUnaccounted(unaccounted) } } + +// testingSetTokens is used in tests to set the tokens for a given work class, +// ignoring any adjustments. +func (t *tokenCounter) testingSetTokens( + ctx context.Context, wc admissionpb.WorkClass, tokens kvflowcontrol.Tokens, +) { + t.mu.Lock() + defer t.mu.Unlock() + + t.mu.counters[wc].adjustTokensLocked(ctx, + tokens-t.mu.counters[wc].tokens, t.clock.PhysicalTime()) +} + +func (t *tokenCounter) GetAndResetStats(now time.Time) (regularStats, elasticStats deltaStats) { + t.mu.Lock() + defer t.mu.Unlock() + + regularStats = t.mu.counters[admissionpb.RegularWorkClass].getAndResetStats(now) + elasticStats = t.mu.counters[admissionpb.ElasticWorkClass].getAndResetStats(now) + return regularStats, elasticStats +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go index bf5de48184b6..d54c69729263 100644 --- a/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_counter_test.go @@ -13,15 +13,20 @@ package rac2 import ( "context" "fmt" + "sort" "strings" "sync" "testing" + "time" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/datadriven" "github.com/dustin/go-humanize" "github.com/stretchr/testify/require" @@ -41,6 +46,10 @@ func TestTokenAdjustment(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + provider := NewStreamTokenCounterProvider( + cluster.MakeTestingClusterSettings(), + hlc.NewClockForTesting(nil), + ) var ( ctx = context.Background() counter *tokenCounter @@ -51,7 +60,9 @@ func TestTokenAdjustment(t *testing.T) { func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "init": - counter = newTokenCounter(cluster.MakeTestingClusterSettings()) + var stream int + d.ScanArgs(t, "stream", &stream) + counter = provider.Eval(kvflowcontrol.Stream{StoreID: roachpb.StoreID(stream)}) adjustments = nil return "" @@ -117,6 +128,25 @@ func TestTokenAdjustment(t *testing.T) { } return buf.String() + case "metrics": + provider.UpdateMetricGauges() + var buf strings.Builder + // We are only using the eval token counter in this test. + counterMetrics := provider.tokenMetrics.CounterMetrics[flowControlEvalMetricType] + streamMetrics := provider.tokenMetrics.StreamMetrics[flowControlEvalMetricType] + for _, wc := range []admissionpb.WorkClass{ + admissionpb.RegularWorkClass, + admissionpb.ElasticWorkClass, + } { + fmt.Fprintf(&buf, "%-48v: %v\n", streamMetrics.Count[wc].GetName(), streamMetrics.Count[wc].Value()) + fmt.Fprintf(&buf, "%-48v: %v\n", streamMetrics.BlockedCount[wc].GetName(), streamMetrics.BlockedCount[wc].Value()) + fmt.Fprintf(&buf, "%-48v: %v\n", streamMetrics.TokensAvailable[wc].GetName(), streamMetrics.TokensAvailable[wc].Value()) + fmt.Fprintf(&buf, "%-48v: %v\n", counterMetrics.Deducted[wc].GetName(), counterMetrics.Deducted[wc].Count()) + fmt.Fprintf(&buf, "%-48v: %v\n", counterMetrics.Returned[wc].GetName(), counterMetrics.Returned[wc].Count()) + fmt.Fprintf(&buf, "%-48v: %v\n", counterMetrics.Unaccounted[wc].GetName(), counterMetrics.Unaccounted[wc].Count()) + } + return buf.String() + default: return fmt.Sprintf("unknown command: %s", d.Cmd) } @@ -169,7 +199,11 @@ func TestTokenCounter(t *testing.T) { settings := cluster.MakeTestingClusterSettings() kvflowcontrol.ElasticTokensPerStream.Override(ctx, &settings.SV, int64(limits.elastic)) kvflowcontrol.RegularTokensPerStream.Override(ctx, &settings.SV, int64(limits.regular)) - counter := newTokenCounter(settings) + counter := newTokenCounter( + settings, + hlc.NewClockForTesting(nil), + newTokenCounterMetrics(flowControlEvalMetricType), + ) assertStateReset := func(t *testing.T) { available, handle := counter.TokensAvailable(admissionpb.ElasticWorkClass) @@ -292,3 +326,257 @@ func TestTokenCounter(t *testing.T) { assertStateReset(t) }) } + +func (t *tokenCounter) testingHandle() waitHandle { + return waitHandle{wc: admissionpb.RegularWorkClass, b: t} +} + +type namedTokenCounter struct { + *tokenCounter + parent *evalTestState + stream string +} + +type evalTestState struct { + settings *cluster.Settings + mu struct { + syncutil.Mutex + counters map[string]*namedTokenCounter + evals map[string]*testEval + } +} + +type testEval struct { + state WaitEndState + handles []tokenWaitingHandleInfo + quorum int + cancel context.CancelFunc + refreshCh chan struct{} +} + +func newTestState() *evalTestState { + ts := &evalTestState{ + settings: cluster.MakeTestingClusterSettings(), + } + // We will only use at most one token per stream, as we only require positive + // / non-positive token counts. + kvflowcontrol.RegularTokensPerStream.Override(context.Background(), &ts.settings.SV, 1) + ts.mu.counters = make(map[string]*namedTokenCounter) + ts.mu.evals = make(map[string]*testEval) + return ts +} + +func (ts *evalTestState) getOrCreateTC(stream string) *namedTokenCounter { + ts.mu.Lock() + defer ts.mu.Unlock() + + tc, exists := ts.mu.counters[stream] + if !exists { + tc = &namedTokenCounter{ + parent: ts, + tokenCounter: newTokenCounter( + ts.settings, + hlc.NewClockForTesting(nil), + newTokenCounterMetrics(flowControlEvalMetricType), + ), + stream: stream, + } + // Ensure the token counter starts with no tokens initially. + tc.adjust(context.Background(), + admissionpb.RegularWorkClass, + -kvflowcontrol.Tokens(kvflowcontrol.RegularTokensPerStream.Get(&ts.settings.SV)), + ) + ts.mu.counters[stream] = tc + } + return tc +} + +func (ts *evalTestState) startWaitForEval( + name string, handles []tokenWaitingHandleInfo, quorum int, +) { + ts.mu.Lock() + defer ts.mu.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + refreshCh := make(chan struct{}) + ts.mu.evals[name] = &testEval{ + state: -1, + handles: handles, + quorum: quorum, + cancel: cancel, + refreshCh: refreshCh, + } + + go func() { + state, _ := WaitForEval(ctx, refreshCh, handles, quorum, nil) + ts.mu.Lock() + defer ts.mu.Unlock() + + ts.mu.evals[name].state = state + }() +} + +func (ts *evalTestState) setCounterTokens(stream string, positive bool) { + ts.mu.Lock() + defer ts.mu.Unlock() + + tc, exists := ts.mu.counters[stream] + if !exists { + panic(fmt.Sprintf("no token counter found for stream: %s", stream)) + } + + wasPositive := tc.tokens(admissionpb.RegularWorkClass) > 0 + if !wasPositive && positive { + tc.tokenCounter.adjust(context.Background(), admissionpb.RegularWorkClass, +1) + } else if wasPositive && !positive { + tc.tokenCounter.adjust(context.Background(), admissionpb.RegularWorkClass, -1) + } +} + +func (ts *evalTestState) tokenCountsString() string { + ts.mu.Lock() + defer ts.mu.Unlock() + + var streams []string + for stream := range ts.mu.counters { + streams = append(streams, stream) + } + sort.Strings(streams) + + var b strings.Builder + for _, stream := range streams { + tc := ts.mu.counters[stream] + posString := "non-positive" + if tc.tokens(admissionpb.RegularWorkClass) > 0 { + posString = "positive" + } + b.WriteString(fmt.Sprintf("%s: %s\n", stream, posString)) + } + return strings.TrimSpace(b.String()) +} + +func (ts *evalTestState) evalStatesString() string { + // Introduce a sleep here to ensure that any evaluation which complete update + // state before we lock the mutex. + time.Sleep(100 * time.Millisecond) + ts.mu.Lock() + defer ts.mu.Unlock() + + var states []string + for name, op := range ts.mu.evals { + switch op.state { + case -1: + states = append(states, fmt.Sprintf("%s: waiting", name)) + default: + states = append(states, fmt.Sprintf("%s: %s", name, op.state)) + } + } + sort.Strings(states) + return strings.Join(states, "\n") +} + +// TestWaitForEval is a datadriven test that exercises the WaitForEval function. +// +// - wait_for_eval [handle: required=] +// name: the name of the WaitForEval operation. +// quorum: the number of handles that must be unblocked for the operation to +// succeed. +// stream: the stream name. +// required: whether the handle is required for the operation to succeed. +// +// - set_tokens +// stream: the stream name. +// positive: whether the stream should have positive tokens. +// +// - check_state +// Prints the current state of all WaitForEval operations. +// +// - cancel +// name: the name of the WaitForEval operation to cancel. +// +// - refresh +// name: the name of the WaitForEval operation to refresh. +func TestWaitForEval(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ts := newTestState() + datadriven.RunTest(t, "testdata/wait_for_eval", func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "wait_for_eval": + var name string + var quorum int + var handles []tokenWaitingHandleInfo + + d.ScanArgs(t, "name", &name) + d.ScanArgs(t, "quorum", &quorum) + for _, line := range strings.Split(d.Input, "\n") { + require.True(t, strings.HasPrefix(line, "handle:")) + line = strings.TrimPrefix(line, "handle:") + line = strings.TrimSpace(line) + parts := strings.Split(line, " ") + require.Len(t, parts, 2) + + parts[0] = strings.TrimSpace(parts[0]) + require.True(t, strings.HasPrefix(parts[0], "stream=")) + parts[0] = strings.TrimPrefix(strings.TrimSpace(parts[0]), "stream=") + stream := parts[0] + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "required=")) + parts[1] = strings.TrimPrefix(strings.TrimSpace(parts[1]), "required=") + required := parts[1] == "true" + + handleInfo := tokenWaitingHandleInfo{ + handle: ts.getOrCreateTC(stream).testingHandle(), + requiredWait: required, + } + handles = append(handles, handleInfo) + } + + ts.startWaitForEval(name, handles, quorum) + return ts.evalStatesString() + + case "set_tokens": + for _, arg := range d.CmdArgs { + require.Equal(t, 1, len(arg.Vals)) + ts.setCounterTokens(arg.Key, arg.Vals[0] == "positive") + } + return ts.tokenCountsString() + + case "check_state": + return ts.evalStatesString() + + case "cancel": + var name string + d.ScanArgs(t, "name", &name) + func() { + ts.mu.Lock() + defer ts.mu.Unlock() + if op, exists := ts.mu.evals[name]; exists { + op.cancel() + } else { + panic(fmt.Sprintf("no WaitForEval operation with name: %s", name)) + } + }() + return ts.evalStatesString() + + case "refresh": + var name string + d.ScanArgs(t, "name", &name) + func() { + ts.mu.Lock() + defer ts.mu.Unlock() + if op, exists := ts.mu.evals[name]; exists { + op.refreshCh <- struct{}{} + } else { + panic(fmt.Sprintf("no WaitForEval operation with name: %s", name)) + } + }() + return ts.evalStatesString() + + default: + panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + } + }) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker.go new file mode 100644 index 000000000000..dd7f855a036b --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker.go @@ -0,0 +1,129 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/log" +) + +// Tracker tracks flow token deductions for a replicaSendStream. Tokens are +// deducted for an in-flight log entry identified by its raft log index and +// term with a given RaftPriority. +type Tracker struct { + // tracked entries are stored in increasing order of (term, index), per + // priority. + tracked [raftpb.NumPriorities][]tracked + + stream kvflowcontrol.Stream // used for logging only +} + +// tracked represents tracked flow tokens; they're tracked with respect to a +// raft log index and term. +type tracked struct { + tokens kvflowcontrol.Tokens + index, term uint64 +} + +func (dt *Tracker) Init(stream kvflowcontrol.Stream) { + *dt = Tracker{ + tracked: [raftpb.NumPriorities][]tracked{}, + stream: stream, + } +} + +// Track token deductions of the given priority with the given raft log index and term. +func (t *Tracker) Track( + ctx context.Context, term uint64, index uint64, pri raftpb.Priority, tokens kvflowcontrol.Tokens, +) bool { + if len(t.tracked[pri]) >= 1 { + last := t.tracked[pri][len(t.tracked[pri])-1] + // Tracker exists in the context of a single replicaSendStream, which cannot + // span the leader losing leadership and regaining it. So the indices must + // advance. + if last.index >= index { + log.Fatalf(ctx, "expected in order tracked log indexes (%d < %d)", + last.index, index) + return false + } + if last.term > term { + log.Fatalf(ctx, "expected in order tracked leader terms (%d < %d)", + last.term, term) + return false + } + } + + t.tracked[pri] = append(t.tracked[pri], tracked{ + tokens: tokens, + index: index, + term: term, + }) + + return true +} + +// Untrack all token deductions of the given priority that have indexes less +// than or equal to the one provided, per priority, and terms less than or +// equal to the leader term. +func (t *Tracker) Untrack( + term uint64, admitted [raftpb.NumPriorities]uint64, +) (returned [raftpb.NumPriorities]kvflowcontrol.Tokens) { + for pri := range admitted { + uptoIndex := admitted[pri] + var untracked int + for n := len(t.tracked[pri]); untracked < n; untracked++ { + deduction := t.tracked[pri][untracked] + if deduction.term > term || (deduction.term == term && deduction.index > uptoIndex) { + break + } + returned[pri] += deduction.tokens + } + t.tracked[pri] = t.tracked[pri][untracked:] + } + + return returned +} + +// UntrackGE untracks all token deductions of the given priority that have +// indexes greater than or equal to the one provided. +func (t *Tracker) UntrackGE(index uint64) (returned [raftpb.NumPriorities]kvflowcontrol.Tokens) { + for pri := range t.tracked { + j := len(t.tracked[pri]) - 1 + for j >= 0 { + tr := t.tracked[pri][j] + if tr.index >= index { + returned[pri] += tr.tokens + j-- + } else { + break + } + } + t.tracked[pri] = t.tracked[pri][:j+1] + } + + return returned +} + +// UntrackAll iterates through all tracked token deductions, untracking all of them +// and returning the sum of tokens for each priority. +func (t *Tracker) UntrackAll() (returned [raftpb.NumPriorities]kvflowcontrol.Tokens) { + for pri, deductions := range t.tracked { + for _, deduction := range deductions { + returned[pri] += deduction.tokens + } + } + t.tracked = [raftpb.NumPriorities][]tracked{} + + return returned +} diff --git a/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker_test.go b/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker_test.go new file mode 100644 index 000000000000..915098ec99cc --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/rac2/token_tracker_test.go @@ -0,0 +1,132 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rac2 + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/datadriven" + "github.com/stretchr/testify/require" +) + +func formatTrackerState(t *Tracker) string { + var result strings.Builder + for pri, tracked := range t.tracked { + if len(tracked) > 0 { + result.WriteString(fmt.Sprintf("%v:\n", raftpb.Priority(pri))) + for _, tr := range tracked { + result.WriteString(fmt.Sprintf(" term=%d index=%-2d tokens=%-3d\n", + tr.term, tr.index, tr.tokens)) + } + } + } + return result.String() +} + +func formatUntracked(untracked [raftpb.NumPriorities]kvflowcontrol.Tokens) string { + var buf strings.Builder + for pri, tokens := range untracked { + if tokens > 0 { + buf.WriteString(fmt.Sprintf("returned: tokens=%-4d pri=%v\n", tokens, raftpb.Priority(pri))) + } + } + return buf.String() +} + +func TestTokenTracker(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + tracker := &Tracker{} + tracker.Init(kvflowcontrol.Stream{}) + datadriven.RunTest(t, "testdata/token_tracker", func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "track": + var buf strings.Builder + for _, line := range strings.Split(d.Input, "\n") { + line = strings.TrimSpace(line) + parts := strings.Split(line, " ") + require.Len(t, parts, 4) + + parts[0] = strings.TrimSpace(parts[0]) + require.True(t, strings.HasPrefix(parts[0], "term=")) + parts[0] = strings.TrimPrefix(parts[0], "term=") + term, err := strconv.ParseUint(parts[0], 10, 64) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "index=")) + parts[1] = strings.TrimPrefix(parts[1], "index=") + index, err := strconv.ParseUint(parts[1], 10, 64) + require.NoError(t, err) + + parts[2] = strings.TrimSpace(parts[2]) + require.True(t, strings.HasPrefix(parts[2], "tokens=")) + parts[2] = strings.TrimPrefix(parts[2], "tokens=") + tokens, err := strconv.ParseInt(parts[2], 10, 64) + require.NoError(t, err) + + parts[3] = strings.TrimSpace(parts[3]) + require.True(t, strings.HasPrefix(parts[3], "pri=")) + parts[3] = strings.TrimPrefix(parts[3], "pri=") + pri := AdmissionToRaftPriority(parsePriority(t, parts[3])) + + tracker.Track(ctx, term, index, pri, kvflowcontrol.Tokens(tokens)) + buf.WriteString(fmt.Sprintf("tracked: term=%d index=%-2d tokens=%-3d pri=%v\n", + term, index, tokens, pri)) + } + return buf.String() + + case "untrack": + var term uint64 + d.ScanArgs(t, "term", &term) + var admitted [raftpb.NumPriorities]uint64 + for _, line := range strings.Split(d.Input, "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + parts := strings.Split(line, "=") + require.Len(t, parts, 2) + priStr := strings.TrimSpace(parts[0]) + indexStr := strings.TrimSpace(parts[1]) + pri := AdmissionToRaftPriority(parsePriority(t, priStr)) + index, err := strconv.ParseUint(indexStr, 10, 64) + require.NoError(t, err) + admitted[pri] = index + } + return formatUntracked(tracker.Untrack(term, admitted)) + + case "untrack_ge": + var index uint64 + d.ScanArgs(t, "index", &index) + return formatUntracked(tracker.UntrackGE(index)) + + case "untrack_all": + return formatUntracked(tracker.UntrackAll()) + + case "state": + return formatTrackerState(tracker) + + default: + return fmt.Sprintf("unknown command: %s", d.Cmd) + } + }) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/BUILD.bazel b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/BUILD.bazel index 66e78d11f313..15d56c79fe33 100644 --- a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/BUILD.bazel +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/BUILD.bazel @@ -4,20 +4,27 @@ go_library( name = "replica_rac2", srcs = [ "admission.go", + "close_scheduler.go", "doc.go", "processor.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2", visibility = ["//visibility:public"], deps = [ + "//pkg/kv/kvserver/kvflowcontrol", + "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", "//pkg/kv/kvserver/kvflowcontrol/rac2", "//pkg/kv/kvserver/raftlog", "//pkg/raft/raftpb", "//pkg/roachpb", + "//pkg/settings/cluster", "//pkg/util/admission/admissionpb", "//pkg/util/buildutil", + "//pkg/util/hlc", "//pkg/util/log", + "//pkg/util/stop", "//pkg/util/syncutil", + "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", ], ) @@ -26,11 +33,13 @@ go_test( name = "replica_rac2_test", srcs = [ "admission_test.go", + "close_scheduler_test.go", "processor_test.go", ], data = glob(["testdata/**"]), embed = [":replica_rac2"], deps = [ + "//pkg/kv/kvserver/kvflowcontrol", "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb", "//pkg/kv/kvserver/kvflowcontrol/rac2", "//pkg/kv/kvserver/kvserverbase", @@ -38,13 +47,17 @@ go_test( "//pkg/kv/kvserver/raftlog", "//pkg/raft/raftpb", "//pkg/roachpb", + "//pkg/settings/cluster", "//pkg/testutils/datapathutils", "//pkg/util/admission/admissionpb", "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/protoutil", + "//pkg/util/stop", "//pkg/util/syncutil", + "//pkg/util/timeutil", "@com_github_cockroachdb_datadriven//:datadriven", + "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler.go b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler.go new file mode 100644 index 000000000000..3b0350a1d7da --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler.go @@ -0,0 +1,201 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package replica_rac2 + +import ( + "container/heap" + "context" + "time" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" +) + +type streamCloseScheduler struct { + stopper *stop.Stopper + clock timeutil.TimeSource + scheduler RaftScheduler + // nonEmptyCh is used to signal the scheduler that there are events to + // process. When the heap is empty, the scheduler will wait for the next + // event to be added before processing, by waiting on this channel. + nonEmptyCh chan struct{} + + mu struct { + syncutil.Mutex + scheduled scheduledQueue + } +} + +type scheduledCloseEvent struct { + rangeID roachpb.RangeID + at time.Time +} + +// scheduledQueue implements the heap.Interface. +type scheduledQueue struct { + items []scheduledCloseEvent +} + +func NewStreamCloseScheduler( + stopper *stop.Stopper, clock timeutil.TimeSource, scheduler RaftScheduler, +) *streamCloseScheduler { + return &streamCloseScheduler{stopper: stopper, scheduler: scheduler, clock: clock} +} + +func (s *streamCloseScheduler) Start(ctx context.Context) error { + s.nonEmptyCh = make(chan struct{}, 1) + return s.stopper.RunAsyncTask(ctx, + "flow-control-stream-close-scheduler", s.run) +} + +// streamCloseScheduler implements the rac2.ProbeToCloseTimerScheduler +// interface. +var _ rac2.ProbeToCloseTimerScheduler = &streamCloseScheduler{} + +// ScheduleSendStreamCloseRaftMuLocked schedules a callback with a raft event +// after the given delay. +// +// Requires raftMu to be held. +func (s *streamCloseScheduler) ScheduleSendStreamCloseRaftMuLocked( + ctx context.Context, rangeID roachpb.RangeID, delay time.Duration, +) { + now := s.clock.Now() + event := scheduledCloseEvent{ + rangeID: rangeID, + at: now.Add(delay), + } + s.mu.Lock() + defer s.mu.Unlock() + + curLen := s.mu.scheduled.Len() + recalcDelay := (curLen > 0 && s.mu.scheduled.items[0].at.After(event.at)) || curLen == 0 + heap.Push(&s.mu.scheduled, event) + if recalcDelay { + // This is the first item in the queue, or this item is scheduled ahead of + // the first currently scheduled item, so signal the scheduler to + // recalculate the delay. + select { + case s.nonEmptyCh <- struct{}{}: + default: + } + } +} + +// maxStreamCloserDelay is the maximum time the stream closer will wait before +// checking for the next event. When there are no events to process, this +// constant is used to avoid the timer from signaling. +const maxStreamCloserDelay = 24 * time.Hour + +func (s *streamCloseScheduler) run(_ context.Context) { + timer := s.clock.NewTimer() + timer.Reset(s.nextDelay(s.clock.Now())) + defer timer.Stop() + + for { + // When there are no more events to wait for, the timer is set to the + // maxStreamCloserDelay. When an event is added, the nonEmptyCh will be + // signaled and the timer will be reset to the next event's delay. + select { + case <-s.stopper.ShouldQuiesce(): + return + case <-s.nonEmptyCh: + case <-timer.Ch(): + timer.MarkRead() + } + + now := s.clock.Now() + for _, event := range s.readyEvents(now) { + s.scheduler.EnqueueRaftReady(event.rangeID) + } + now = s.clock.Now() + nextDelay := s.nextDelay(now) + timer.Reset(nextDelay) + } +} + +// nextDelay returns the time to wait until the next event is ready to be +// processed, or if there are no events, returns a long duration. +func (s *streamCloseScheduler) nextDelay(now time.Time) (delay time.Duration) { + s.mu.Lock() + defer s.mu.Unlock() + + delay = maxStreamCloserDelay + if s.mu.scheduled.Len() > 0 { + next := heap.Pop(&s.mu.scheduled).(scheduledCloseEvent) + if delay = next.at.Sub(now); delay == 0 { + // A non-positive delay will cause the timer to error, so we set it to a + // small value instead which will occur immediately. + delay = time.Nanosecond + } + heap.Push(&s.mu.scheduled, next) + } + + return delay +} + +// readyEventsLocked returns a slice scheduled events which are ready. +func (s *streamCloseScheduler) readyEvents(now time.Time) []scheduledCloseEvent { + s.mu.Lock() + defer s.mu.Unlock() + + var events []scheduledCloseEvent + for s.mu.scheduled.Len() > 0 { + next := s.mu.scheduled.items[0] + if next.at.After(now) { + break + } + events = append(events, heap.Pop(&s.mu.scheduled).(scheduledCloseEvent)) + } + + return events +} + +func (s scheduledCloseEvent) Less(other scheduledCloseEvent) bool { + if s.at.Equal(other.at) { + return s.rangeID < other.rangeID + } + return s.at.Before(other.at) +} + +// Len returns the number of items in the priority queue. +func (pq *scheduledQueue) Len() int { + return len(pq.items) +} + +// Less reports whether the element with index i should sort before the element +// with index j. +func (pq *scheduledQueue) Less(i, j int) bool { + return pq.items[i].Less(pq.items[j]) +} + +// Swap swaps the elements with indexes i and j. +func (pq *scheduledQueue) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] +} + +// Push adds x as an element to the priority queue. +func (pq *scheduledQueue) Push(x interface{}) { + item := x.(scheduledCloseEvent) + pq.items = append(pq.items, item) +} + +// Pop removes and returns the minimum element (according to Less) from the +// priority queue. +func (pq *scheduledQueue) Pop() interface{} { + old := pq.items + n := len(old) + item := old[n-1] + pq.items = old[0 : n-1] + return item +} diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler_test.go b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler_test.go new file mode 100644 index 000000000000..4b4d01ed031c --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/close_scheduler_test.go @@ -0,0 +1,145 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package replica_rac2 + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/datadriven" + "github.com/stretchr/testify/require" +) + +type testingRaftScheduler struct { + clock timeutil.TimeSource + history []scheduledCloseEvent +} + +func (t *testingRaftScheduler) EnqueueRaftReady(id roachpb.RangeID) { + t.history = append(t.history, scheduledCloseEvent{rangeID: id, at: t.clock.Now()}) +} + +func (t *testingRaftScheduler) String() string { + var buf strings.Builder + buf.WriteString("complete:\n") + for _, e := range t.history { + // The history is already sorted by completion time, so we don't need to + // sort it again here for deterministic output. + buf.WriteString(fmt.Sprintf(" %vs: range_id=%v\n", e.at.Unix(), e.rangeID)) + } + return buf.String() +} + +func TestStreamCloseScheduler(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + var ( + closeScheduler *streamCloseScheduler + raftScheduler *testingRaftScheduler + clock *timeutil.ManualTime + stopper *stop.Stopper + ) + + formatCloseScheduler := func() string { + closeScheduler.mu.Lock() + defer closeScheduler.mu.Unlock() + + var buf strings.Builder + // Sort the items for deterministic output. + items := []scheduledCloseEvent{} + items = append(items, closeScheduler.mu.scheduled.items...) + sort.Slice(items, func(i, j int) bool { + return items[i].Less(items[j]) + }) + + buf.WriteString("waiting=[") + for i, e := range items { + if i > 0 { + buf.WriteString(",") + } + fmt.Fprintf(&buf, "(r%v,t%vs)", e.rangeID, e.at.Unix()) + } + buf.WriteString("]") + return buf.String() + } + + datadriven.RunTest(t, "testdata/close_scheduler", func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "init": + stopper = stop.NewStopper() + clock = timeutil.NewManualTime(timeutil.UnixEpoch) + raftScheduler = &testingRaftScheduler{clock: clock} + closeScheduler = NewStreamCloseScheduler(stopper, clock, raftScheduler) + require.NoError(t, closeScheduler.Start(ctx)) + return fmt.Sprintf("now=%vs", clock.Now().Unix()) + + case "schedule": + var buf strings.Builder + now := clock.Now() + + fmt.Fprintf(&buf, "submitted now=%vs\n", now.Unix()) + for _, line := range strings.Split(d.Input, "\n") { + var rangeID int + + parts := strings.Fields(line) + parts[0] = strings.TrimSpace(parts[0]) + require.True(t, strings.HasPrefix(parts[0], "range_id=")) + parts[0] = strings.TrimPrefix(parts[0], "range_id=") + rangeID, err := strconv.Atoi(parts[0]) + require.NoError(t, err) + + parts[1] = strings.TrimSpace(parts[1]) + require.True(t, strings.HasPrefix(parts[1], "delay=")) + parts[1] = strings.TrimPrefix(parts[1], "delay=") + delay, err := time.ParseDuration(parts[1]) + require.NoError(t, err) + + // Schedule the event and record the time it was scheduled at and for. + closeScheduler.ScheduleSendStreamCloseRaftMuLocked( + ctx, roachpb.RangeID(rangeID), delay) + time.Sleep(20 * time.Millisecond) + fmt.Fprintf(&buf, " range_id=%v @ %vs (%vs+%vs)\n", rangeID, + now.Add(delay).Unix(), now.Unix(), delay.Seconds()) + } + return buf.String() + + case "tick": + var durationStr string + d.ScanArgs(t, "duration", &durationStr) + duration, err := time.ParseDuration(durationStr) + require.NoError(t, err) + clock.Advance(duration) + // Delay to allow the channel selects to fire. + time.Sleep(20 * time.Millisecond) + return fmt.Sprintf("now=%vs %v\n%v", + clock.Now().Unix(), formatCloseScheduler(), raftScheduler.String()) + + case "stop": + stopper.Stop(ctx) + return "" + + default: + panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + } + }) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor.go b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor.go index 764441484f2d..a34c2cb41afe 100644 --- a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor.go +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor.go @@ -15,12 +15,16 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftlog" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" @@ -99,10 +103,16 @@ type RaftNode interface { // Read-only methods. + // rac2.RaftInterface is an interface that abstracts the raft.RawNode for use + // in the RangeController. + rac2.RaftInterface + // TermLocked returns the current term of this replica. + TermLocked() uint64 // LeaderLocked returns the current known leader. This state can advance // past the group membership state, so the leader returned here may not be // known as a current group member. LeaderLocked() roachpb.ReplicaID + // StableIndexLocked is the (inclusive) highest index that is known to be // successfully persisted in local storage. StableIndexLocked() uint64 @@ -115,9 +125,6 @@ type RaftNode interface { NextUnstableIndexLocked() uint64 // GetAdmittedLocked returns the current value of the admitted array. GetAdmittedLocked() [raftpb.NumPriorities]uint64 - // MyLeaderTermLocked returns the term, if this replica is the leader, else - // 0. - MyLeaderTermLocked() uint64 // Mutating methods. @@ -130,21 +137,21 @@ type RaftNode interface { StepMsgAppRespForAdmittedLocked(raftpb.Message) error } -// AdmittedPiggybacker is used to enqueue MsgAppResp messages whose purpose is -// to advance Admitted. For efficiency, these need to be piggybacked on other -// messages being sent to the given leader node. The StoreID and RangeID are -// provided so that the leader node can route the incoming message to the -// relevant range. +// AdmittedPiggybacker is used to enqueue admitted vector messages addressed to +// replicas on a particular node. For efficiency, these need to be piggybacked +// on other messages being sent to the given leader node. The store / range / +// replica IDs are provided so that the leader node can route the incoming +// message to the relevant range. type AdmittedPiggybacker interface { - AddMsgAppRespForLeader(roachpb.NodeID, roachpb.StoreID, roachpb.RangeID, raftpb.Message) + Add(roachpb.NodeID, kvflowcontrolpb.PiggybackedAdmittedState) } // EntryForAdmission is the information provided to the admission control (AC) // system, when requesting admission. type EntryForAdmission struct { - // Information needed by the AC system, for deciding when to admit, and - // for maintaining its accounting of how much work has been - // requested/admitted. + // Information needed by the AC system, for deciding when to admit, and for + // maintaining its accounting of how much work has been requested/admitted. + StoreID roachpb.StoreID TenantID roachpb.TenantID Priority admissionpb.WorkPriority CreateTime int64 @@ -155,6 +162,10 @@ type EntryForAdmission struct { // ingested into Pebble. Ingested bool + // Routing info to get to the Processor, in addition to StoreID. + RangeID roachpb.RangeID + ReplicaID roachpb.ReplicaID + // CallbackState is information that is needed by the callback when the // entry is admitted. CallbackState EntryForAdmissionCallbackState @@ -163,34 +174,35 @@ type EntryForAdmission struct { // EntryForAdmissionCallbackState is passed to the callback when the entry is // admitted. type EntryForAdmissionCallbackState struct { - // Routing state to get to the Processor. - StoreID roachpb.StoreID - RangeID roachpb.RangeID - - // State needed by the Processor. - ReplicaID roachpb.ReplicaID - LeaderTerm uint64 - Index uint64 - Priority raftpb.Priority + Mark rac2.LogMark + Priority raftpb.Priority } // ACWorkQueue abstracts the behavior needed from admission.WorkQueue. type ACWorkQueue interface { - Admit(ctx context.Context, entry EntryForAdmission) + // Admit returns false if the entry was not submitted for admission for + // some reason. + Admit(ctx context.Context, entry EntryForAdmission) bool } -// TODO(sumeer): temporary placeholder, until RangeController is more fully -// fleshed out. type rangeControllerInitState struct { replicaSet rac2.ReplicaSet leaseholder roachpb.ReplicaID nextRaftIndex uint64 + // These fields are required options for the RangeController specific to the + // replica and range, rather than the store or node, so we pass them as part + // of the range controller init state. + rangeID roachpb.RangeID + tenantID roachpb.TenantID + localReplicaID roachpb.ReplicaID + raftInterface rac2.RaftInterface + admittedTracker rac2.AdmittedTracker } // RangeControllerFactory abstracts RangeController creation for testing. type RangeControllerFactory interface { // New creates a new RangeController. - New(state rangeControllerInitState) rac2.RangeController + New(ctx context.Context, state rangeControllerInitState) rac2.RangeController } // EnabledWhenLeaderLevel captures the level at which RACv2 is enabled when @@ -216,7 +228,6 @@ type ProcessorOptions struct { NodeID roachpb.NodeID StoreID roachpb.StoreID RangeID roachpb.RangeID - TenantID roachpb.TenantID ReplicaID roachpb.ReplicaID Replica Replica @@ -224,6 +235,8 @@ type ProcessorOptions struct { AdmittedPiggybacker AdmittedPiggybacker ACWorkQueue ACWorkQueue RangeControllerFactory RangeControllerFactory + Settings *cluster.Settings + EvalWaitMetrics *rac2.EvalWaitMetrics EnabledWhenLeaderLevel EnabledWhenLeaderLevel } @@ -308,7 +321,7 @@ type Processor interface { // This may be a noop if the level has already been reached. // // raftMu is held. - SetEnabledWhenLeaderRaftMuLocked(level EnabledWhenLeaderLevel) + SetEnabledWhenLeaderRaftMuLocked(ctx context.Context, level EnabledWhenLeaderLevel) // GetEnabledWhenLeader returns the current level. It may be used in // highly concurrent settings at the leaseholder, when waiting for eval, // and when encoding a proposal. Note that if the leaseholder is not the @@ -317,7 +330,8 @@ type Processor interface { // the v1 entry encoding. GetEnabledWhenLeader() EnabledWhenLeaderLevel - // OnDescChangedLocked provides a possibly updated RangeDescriptor. + // OnDescChangedLocked provides a possibly updated RangeDescriptor. The + // tenantID passed in all calls must be the same. // // Both Replica mu and raftMu are held. // @@ -328,35 +342,38 @@ type Processor interface { // OnDescChangedRaftMuLocked, or (b) add a method in RangeController that // only updates the voting replicas used in WaitForEval, and call that // from OnDescChangedLocked, and do the rest of the updating later. - OnDescChangedLocked(ctx context.Context, desc *roachpb.RangeDescriptor) + OnDescChangedLocked( + ctx context.Context, desc *roachpb.RangeDescriptor, tenantID roachpb.TenantID) // HandleRaftReadyRaftMuLocked corresponds to processing that happens when // Replica.handleRaftReadyRaftMuLocked is called. It must be called even // if there was no Ready, since it can be used to advance Admitted, and do // other processing. // - // The entries slice is Ready.Entries, i.e., represent MsgStorageAppend on - // all replicas. To stay consistent with the structure of - // Replica.handleRaftReadyRaftMuLocked, this method only does leader - // specific processing of entries. - // AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked does the general - // replica processing for MsgStorageAppend. + // The RaftEvent represents MsgStorageAppend on all replicas. To stay + // consistent with the structure of Replica.handleRaftReadyRaftMuLocked, this + // method only does leader specific processing of entries. + // AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked does the general replica + // processing for MsgStorageAppend. // // raftMu is held. - HandleRaftReadyRaftMuLocked(ctx context.Context, entries []raftpb.Entry) - // AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked subjects entries to - // admission control on a replica (leader or follower). Like - // HandleRaftReadyRaftMuLocked, this is called from - // Replica.handleRaftReadyRaftMuLocked. It is split off from that function - // since it is natural to position the admission control processing when we - // are writing to the store in Replica.handleRaftReadyRaftMuLocked. This is - // a noop if the leader is not using the RACv2 protocol. Returns false if - // the leader is using RACv1, in which the caller should follow the RACv1 - // admission pathway. + HandleRaftReadyRaftMuLocked(context.Context, rac2.RaftEvent) + // AdmitRaftEntriesRaftMuLocked subjects entries to admission control on a + // replica (leader or follower). Like HandleRaftReadyRaftMuLocked, this is + // called from Replica.handleRaftReadyRaftMuLocked. + // + // It is split off from that function since it is natural to position the + // admission control processing when we are writing to the store in + // Replica.handleRaftReadyRaftMuLocked. This is mostly a noop if the leader is + // not using the RACv2 protocol. + // + // Returns false if the leader is using RACv1 and the replica is not + // destroyed, in which case the caller should follow the RACv1 admission + // pathway. // // raftMu is held. - AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked( - ctx context.Context, leaderTerm uint64, entries []raftpb.Entry) bool + AdmitRaftEntriesRaftMuLocked( + ctx context.Context, event rac2.RaftEvent) bool // EnqueuePiggybackedAdmittedAtLeader is called at the leader when // receiving a piggybacked MsgAppResp that can advance a follower's @@ -387,6 +404,15 @@ type Processor interface { AdmittedLogEntry( ctx context.Context, state EntryForAdmissionCallbackState, ) + + // AdmitForEval is called to admit work that wants to evaluate at the + // leaseholder. + // + // If the callee decided not to admit because replication admission + // control is disabled, or for any other reason, admitted will be false + // and error will be nil. + AdmitForEval( + ctx context.Context, pri admissionpb.WorkPriority, ct time.Time) (admitted bool, err error) } type processorImpl struct { @@ -420,7 +446,12 @@ type processorImpl struct { // protocol is enabled. leader struct { enqueuedPiggybackedResponses map[roachpb.ReplicaID]raftpb.Message - rc rac2.RangeController + // Updating the rc reference requires both the enclosing mu and + // rcReferenceUpdateMu. Code paths that want to access this + // reference only need one of these mutexes. rcReferenceUpdateMu + // is ordered after the enclosing mu. + rcReferenceUpdateMu syncutil.RWMutex + rc rac2.RangeController // Term is used to notice transitions out of leadership and back, // to recreate rc. It is set when rc is created, and is not // up-to-date if there is no rc (which can happen when using the @@ -440,6 +471,8 @@ type processorImpl struct { // the state in replicas. replicas rac2.ReplicaSet replicasChanged bool + // Set once, in the first call to OnDescChanged. + tenantID roachpb.TenantID } // Atomic value, for serving GetEnabledWhenLeader. Mirrors // mu.enabledWhenLeader. @@ -450,6 +483,8 @@ type processorImpl struct { var _ Processor = &processorImpl{} +var _ rac2.AdmittedTracker = &processorImpl{} + func NewProcessor(opts ProcessorOptions) Processor { p := &processorImpl{opts: opts} p.mu.enabledWhenLeader = opts.EnabledWhenLeaderLevel @@ -458,6 +493,16 @@ func NewProcessor(opts ProcessorOptions) Processor { return p } +// isLeaderUsingV2ProcLocked returns true if the current leader uses the V2 +// protocol. +// +// NB: the result of this method does not change while raftMu is held. +func (p *processorImpl) isLeaderUsingV2ProcLocked() bool { + // We are the leader using V2, or a follower who learned that the leader is + // using the V2 protocol. + return p.mu.leader.rc != nil || p.mu.follower.isLeaderUsingV2Protocol +} + // OnDestroyRaftMuLocked implements Processor. func (p *processorImpl) OnDestroyRaftMuLocked(ctx context.Context) { p.opts.Replica.RaftMuAssertHeld() @@ -473,7 +518,9 @@ func (p *processorImpl) OnDestroyRaftMuLocked(ctx context.Context) { } // SetEnabledWhenLeaderRaftMuLocked implements Processor. -func (p *processorImpl) SetEnabledWhenLeaderRaftMuLocked(level EnabledWhenLeaderLevel) { +func (p *processorImpl) SetEnabledWhenLeaderRaftMuLocked( + ctx context.Context, level EnabledWhenLeaderLevel, +) { p.opts.Replica.RaftMuAssertHeld() p.mu.Lock() defer p.mu.Unlock() @@ -487,19 +534,19 @@ func (p *processorImpl) SetEnabledWhenLeaderRaftMuLocked(level EnabledWhenLeader } // May need to create RangeController. var leaderID roachpb.ReplicaID - var myLeaderTerm uint64 + var term uint64 var nextUnstableIndex uint64 func() { p.opts.Replica.MuLock() defer p.opts.Replica.MuUnlock() leaderID = p.raftMu.raftNode.LeaderLocked() if leaderID == p.opts.ReplicaID { - myLeaderTerm = p.raftMu.raftNode.MyLeaderTermLocked() + term = p.raftMu.raftNode.TermLocked() nextUnstableIndex = p.raftMu.raftNode.NextUnstableIndexLocked() } }() if leaderID == p.opts.ReplicaID { - p.createLeaderStateRaftMuLockedProcLocked(myLeaderTerm, nextUnstableIndex) + p.createLeaderStateRaftMuLockedProcLocked(ctx, term, nextUnstableIndex) } } @@ -517,16 +564,31 @@ func descToReplicaSet(desc *roachpb.RangeDescriptor) rac2.ReplicaSet { } // OnDescChangedLocked implements Processor. -func (p *processorImpl) OnDescChangedLocked(ctx context.Context, desc *roachpb.RangeDescriptor) { +func (p *processorImpl) OnDescChangedLocked( + ctx context.Context, desc *roachpb.RangeDescriptor, tenantID roachpb.TenantID, +) { p.opts.Replica.RaftMuAssertHeld() p.opts.Replica.MuAssertHeld() if p.raftMu.replicas == nil { // Replica is initialized, in that we have a descriptor. Get the // RaftNode. p.raftMu.raftNode = p.opts.Replica.RaftNodeMuLocked() + p.raftMu.tenantID = tenantID + } else { + if p.raftMu.tenantID != tenantID { + panic(errors.AssertionFailedf("tenantId was changed from %s to %s", + p.raftMu.tenantID, tenantID)) + } } + initialization := p.raftMu.replicas == nil p.raftMu.replicas = descToReplicaSet(desc) p.raftMu.replicasChanged = true + // We need to promptly return tokens if some replicas have been removed, + // since those tokens could be used by other ranges with replicas on the + // same store. Ensure that promptness by scheduling ready. + if !initialization { + p.opts.RaftScheduler.EnqueueRaftReady(p.opts.RangeID) + } } // makeStateConsistentRaftMuLockedProcLocked, uses the union of the latest @@ -601,7 +663,7 @@ func (p *processorImpl) makeStateConsistentRaftMuLockedProcLocked( p.closeLeaderStateRaftMuLockedProcLocked(ctx) } if p.mu.leader.rc == nil { - p.createLeaderStateRaftMuLockedProcLocked(myLeaderTerm, nextUnstableIndex) + p.createLeaderStateRaftMuLockedProcLocked(ctx, myLeaderTerm, nextUnstableIndex) return } // Existing RangeController. @@ -618,28 +680,41 @@ func (p *processorImpl) closeLeaderStateRaftMuLockedProcLocked(ctx context.Conte return } p.mu.leader.rc.CloseRaftMuLocked(ctx) - p.mu.leader.rc = nil + func() { + p.mu.leader.rcReferenceUpdateMu.Lock() + defer p.mu.leader.rcReferenceUpdateMu.Unlock() + p.mu.leader.rc = nil + }() p.mu.leader.enqueuedPiggybackedResponses = nil p.mu.leader.term = 0 } func (p *processorImpl) createLeaderStateRaftMuLockedProcLocked( - term uint64, nextUnstableIndex uint64, + ctx context.Context, term uint64, nextUnstableIndex uint64, ) { if p.mu.leader.rc != nil { panic("RangeController already exists") } - p.mu.leader.rc = p.opts.RangeControllerFactory.New(rangeControllerInitState{ - replicaSet: p.raftMu.replicas, - leaseholder: p.mu.leaseholderID, - nextRaftIndex: nextUnstableIndex, - }) + func() { + p.mu.leader.rcReferenceUpdateMu.Lock() + defer p.mu.leader.rcReferenceUpdateMu.Unlock() + p.mu.leader.rc = p.opts.RangeControllerFactory.New(ctx, rangeControllerInitState{ + replicaSet: p.raftMu.replicas, + leaseholder: p.mu.leaseholderID, + nextRaftIndex: nextUnstableIndex, + rangeID: p.opts.RangeID, + tenantID: p.raftMu.tenantID, + localReplicaID: p.opts.ReplicaID, + raftInterface: p.raftMu.raftNode, + admittedTracker: p, + }) + }() p.mu.leader.term = term p.mu.leader.enqueuedPiggybackedResponses = map[roachpb.ReplicaID]raftpb.Message{} } // HandleRaftReadyRaftMuLocked implements Processor. -func (p *processorImpl) HandleRaftReadyRaftMuLocked(ctx context.Context, entries []raftpb.Entry) { +func (p *processorImpl) HandleRaftReadyRaftMuLocked(ctx context.Context, e rac2.RaftEvent) { p.opts.Replica.RaftMuAssertHeld() p.mu.Lock() defer p.mu.Unlock() @@ -648,7 +723,7 @@ func (p *processorImpl) HandleRaftReadyRaftMuLocked(ctx context.Context, entries } if p.raftMu.raftNode == nil { if buildutil.CrdbTestBuild { - if len(entries) > 0 { + if len(e.Entries) > 0 { panic(errors.AssertionFailedf("entries provided without raft node")) } } @@ -672,19 +747,18 @@ func (p *processorImpl) HandleRaftReadyRaftMuLocked(ctx context.Context, entries leaseholderID = p.opts.Replica.LeaseholderMuLocked() admitted = p.raftMu.raftNode.GetAdmittedLocked() if leaderID == p.opts.ReplicaID { - myLeaderTerm = p.raftMu.raftNode.MyLeaderTermLocked() + myLeaderTerm = p.raftMu.raftNode.TermLocked() } }() - if len(entries) > 0 { - nextUnstableIndex = entries[0].Index + if len(e.Entries) > 0 { + nextUnstableIndex = e.Entries[0].Index } p.mu.lastObservedStableIndex = stableIndex p.mu.scheduledAdmittedProcessing = false p.makeStateConsistentRaftMuLockedProcLocked( ctx, nextUnstableIndex, leaderID, leaseholderID, myLeaderTerm) - isLeaderUsingV2 := p.mu.leader.rc != nil || p.mu.follower.isLeaderUsingV2Protocol - if !isLeaderUsingV2 { + if !p.isLeaderUsingV2ProcLocked() { return } // If there was a recent MsgStoreAppendResp that triggered this Ready @@ -697,39 +771,38 @@ func (p *processorImpl) HandleRaftReadyRaftMuLocked(ctx context.Context, entries p.opts.Replica.MuUnlock() if p.mu.leader.rc == nil && p.mu.leaderNodeID != 0 { // Follower, and know leaderNodeID, leaderStoreID. - p.opts.AdmittedPiggybacker.AddMsgAppRespForLeader( - p.mu.leaderNodeID, p.mu.leaderStoreID, p.opts.RangeID, msgResp) + // TODO(pav-kv): populate the message correctly. + p.opts.AdmittedPiggybacker.Add(p.mu.leaderNodeID, kvflowcontrolpb.PiggybackedAdmittedState{ + RangeID: p.opts.RangeID, + ToStoreID: p.mu.leaderStoreID, + FromReplicaID: p.opts.ReplicaID, + ToReplicaID: roachpb.ReplicaID(msgResp.To), + Admitted: kvflowcontrolpb.AdmittedState{}, + }) } // Else if the local replica is the leader, we have already told it // about the update by calling SetAdmittedLocked. If the leader is not // known, we simply drop the message. } if p.mu.leader.rc != nil { - if err := p.mu.leader.rc.HandleRaftEventRaftMuLocked(ctx, rac2.RaftEvent{ - Entries: entries, - }); err != nil { + if err := p.mu.leader.rc.HandleRaftEventRaftMuLocked(ctx, e); err != nil { log.Errorf(ctx, "error handling raft event: %v", err) } } } -// AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked implements Processor. -func (p *processorImpl) AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked( - ctx context.Context, leaderTerm uint64, entries []raftpb.Entry, -) bool { - // NB: the state being read here is only modified under raftMu, so it will - // not become stale during this method. - var isLeaderUsingV2Protocol bool - func() { +// AdmitRaftEntriesRaftMuLocked implements Processor. +func (p *processorImpl) AdmitRaftEntriesRaftMuLocked(ctx context.Context, e rac2.RaftEvent) bool { + // Return false only if we're not destroyed and not using V2. + if destroyed, usingV2 := func() (bool, bool) { p.mu.Lock() defer p.mu.Unlock() - isLeaderUsingV2Protocol = !p.mu.destroyed && - (p.mu.leader.rc != nil || p.mu.follower.isLeaderUsingV2Protocol) - }() - if !isLeaderUsingV2Protocol { - return false + return p.mu.destroyed, p.isLeaderUsingV2ProcLocked() + }(); destroyed || !usingV2 { + return destroyed } - for _, entry := range entries { + + for _, entry := range e.Entries { typ, priBits, err := raftlog.EncodingOf(entry) if err != nil { panic(errors.Wrap(err, "unable to determine raft command encoding")) @@ -743,6 +816,7 @@ func (p *processorImpl) AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked( if err != nil { panic(errors.Wrap(err, "unable to decode raft command admission data: %v")) } + mark := rac2.LogMark{Term: e.Term, Index: entry.Index} var raftPri raftpb.Priority if isV2Encoding { raftPri = raftpb.Priority(meta.AdmissionPriority) @@ -753,7 +827,7 @@ func (p *processorImpl) AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked( p.mu.Lock() defer p.mu.Unlock() raftPri = p.mu.follower.lowPriOverrideState.getEffectivePriority(entry.Index, raftPri) - p.mu.waitingForAdmissionState.add(leaderTerm, entry.Index, raftPri) + p.mu.waitingForAdmissionState.add(mark.Term, mark.Index, raftPri) }() } else { raftPri = raftpb.LowPri @@ -766,27 +840,34 @@ func (p *processorImpl) AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked( func() { p.mu.Lock() defer p.mu.Unlock() - p.mu.waitingForAdmissionState.add(leaderTerm, entry.Index, raftPri) + p.mu.waitingForAdmissionState.add(mark.Term, mark.Index, raftPri) }() } admissionPri := rac2.RaftToAdmissionPriority(raftPri) // NB: cannot hold mu when calling Admit since the callback may // execute from inside Admit, when the entry is immediately admitted. - p.opts.ACWorkQueue.Admit(ctx, EntryForAdmission{ - TenantID: p.opts.TenantID, + submitted := p.opts.ACWorkQueue.Admit(ctx, EntryForAdmission{ + StoreID: p.opts.StoreID, + TenantID: p.raftMu.tenantID, Priority: admissionPri, CreateTime: meta.AdmissionCreateTime, RequestedCount: int64(len(entry.Data)), Ingested: typ.IsSideloaded(), + RangeID: p.opts.RangeID, + ReplicaID: p.opts.ReplicaID, CallbackState: EntryForAdmissionCallbackState{ - StoreID: p.opts.StoreID, - RangeID: p.opts.RangeID, - ReplicaID: p.opts.ReplicaID, - LeaderTerm: leaderTerm, - Index: entry.Index, - Priority: raftPri, + Mark: mark, + Priority: raftPri, }, }) + if !submitted { + // Very rare. e.g. store was not found. + func() { + p.mu.Lock() + defer p.mu.Unlock() + p.mu.waitingForAdmissionState.remove(mark.Term, mark.Index, raftPri) + }() + } } return true } @@ -860,13 +941,13 @@ func (p *processorImpl) AdmittedLogEntry( ) { p.mu.Lock() defer p.mu.Unlock() - if p.mu.destroyed || state.ReplicaID != p.opts.ReplicaID { + if p.mu.destroyed { return } admittedMayAdvance := - p.mu.waitingForAdmissionState.remove(state.LeaderTerm, state.Index, state.Priority) - if !admittedMayAdvance || state.Index > p.mu.lastObservedStableIndex || - (p.mu.leader.rc == nil && !p.mu.follower.isLeaderUsingV2Protocol) { + p.mu.waitingForAdmissionState.remove(state.Mark.Term, state.Mark.Index, state.Priority) + if !admittedMayAdvance || state.Mark.Index > p.mu.lastObservedStableIndex || + !p.isLeaderUsingV2ProcLocked() { return } // The lastObservedStableIndex has moved at or ahead of state.Index. This @@ -878,6 +959,32 @@ func (p *processorImpl) AdmittedLogEntry( } } +// AdmitForEval implements Processor. +func (p *processorImpl) AdmitForEval( + ctx context.Context, pri admissionpb.WorkPriority, ct time.Time, +) (admitted bool, err error) { + workClass := admissionpb.WorkClassFromPri(pri) + mode := kvflowcontrol.Mode.Get(&p.opts.Settings.SV) + bypass := mode == kvflowcontrol.ApplyToElastic && workClass == admissionpb.RegularWorkClass + if bypass { + p.opts.EvalWaitMetrics.OnWaiting(workClass) + p.opts.EvalWaitMetrics.OnBypassed(workClass, 0 /* duration */) + return false, nil + } + var rc rac2.RangeController + func() { + p.mu.leader.rcReferenceUpdateMu.RLock() + defer p.mu.leader.rcReferenceUpdateMu.RUnlock() + rc = p.mu.leader.rc + }() + if rc == nil { + p.opts.EvalWaitMetrics.OnWaiting(workClass) + p.opts.EvalWaitMetrics.OnBypassed(workClass, 0 /* duration */) + return false, nil + } + return p.mu.leader.rc.WaitForEval(ctx, pri) +} + func admittedIncreased(prev, next [raftpb.NumPriorities]uint64) bool { for i := range prev { if prev[i] < next[i] { @@ -886,3 +993,60 @@ func admittedIncreased(prev, next [raftpb.NumPriorities]uint64) bool { } return false } + +// GetAdmitted implements rac2.AdmittedTracker. +func (p *processorImpl) GetAdmitted(replicaID roachpb.ReplicaID) rac2.AdmittedVector { + // TODO(pav-kv): implement + return rac2.AdmittedVector{} +} + +// RangeControllerFactoryImpl implements the RangeControllerFactory interface. +var _ RangeControllerFactory = RangeControllerFactoryImpl{} + +// RangeControllerFactoryImpl is a factory to create RangeControllers. There +// should be one per-store. When a new RangeController is created, the caller +// provides the range specific information as part of rangeControllerInitState. +type RangeControllerFactoryImpl struct { + clock *hlc.Clock + evalWaitMetrics *rac2.EvalWaitMetrics + streamTokenCounterProvider *rac2.StreamTokenCounterProvider + closeTimerScheduler rac2.ProbeToCloseTimerScheduler +} + +func NewRangeControllerFactoryImpl( + clock *hlc.Clock, + evalWaitMetrics *rac2.EvalWaitMetrics, + streamTokenCounterProvider *rac2.StreamTokenCounterProvider, + closeTimerScheduler rac2.ProbeToCloseTimerScheduler, +) RangeControllerFactoryImpl { + return RangeControllerFactoryImpl{ + clock: clock, + evalWaitMetrics: evalWaitMetrics, + streamTokenCounterProvider: streamTokenCounterProvider, + closeTimerScheduler: closeTimerScheduler, + } +} + +// New creates a new RangeController. +func (f RangeControllerFactoryImpl) New( + ctx context.Context, state rangeControllerInitState, +) rac2.RangeController { + return rac2.NewRangeController( + ctx, + rac2.RangeControllerOptions{ + RangeID: state.rangeID, + TenantID: state.tenantID, + LocalReplicaID: state.localReplicaID, + SSTokenCounter: f.streamTokenCounterProvider, + RaftInterface: state.raftInterface, + Clock: f.clock, + CloseTimerScheduler: f.closeTimerScheduler, + AdmittedTracker: state.admittedTracker, + EvalWaitMetrics: f.evalWaitMetrics, + }, + rac2.RangeControllerInitState{ + ReplicaSet: state.replicaSet, + Leaseholder: state.leaseholder, + }, + ) +} diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor_test.go b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor_test.go index 2cf01dd0a9fe..ad83e496263c 100644 --- a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor_test.go +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/processor_test.go @@ -16,7 +16,9 @@ import ( "strconv" "strings" "testing" + "time" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" @@ -24,11 +26,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftlog" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -98,7 +104,7 @@ type testRaftNode struct { leader roachpb.ReplicaID stableIndex uint64 nextUnstableIndex uint64 - myLeaderTerm uint64 + term uint64 } func (rn *testRaftNode) EnablePingForAdmittedLaggingLocked() { @@ -106,6 +112,12 @@ func (rn *testRaftNode) EnablePingForAdmittedLaggingLocked() { fmt.Fprintf(rn.b, " RaftNode.EnablePingForAdmittedLaggingLocked\n") } +func (rn *testRaftNode) TermLocked() uint64 { + rn.r.mu.AssertHeld() + fmt.Fprintf(rn.b, " RaftNode.TermLocked() = %d\n", rn.term) + return rn.term +} + func (rn *testRaftNode) LeaderLocked() roachpb.ReplicaID { rn.r.mu.AssertHeld() fmt.Fprintf(rn.b, " RaftNode.LeaderLocked() = %s\n", rn.leader) @@ -130,12 +142,6 @@ func (rn *testRaftNode) GetAdmittedLocked() [raftpb.NumPriorities]uint64 { return rn.admitted } -func (rn *testRaftNode) MyLeaderTermLocked() uint64 { - rn.r.mu.AssertHeld() - fmt.Fprintf(rn.b, " RaftNode.MyLeaderTermLocked() = %d\n", rn.myLeaderTerm) - return rn.myLeaderTerm -} - func (rn *testRaftNode) SetAdmittedLocked(admitted [raftpb.NumPriorities]uint64) raftpb.Message { rn.r.mu.AssertHeld() // TODO(sumeer): set more fields. @@ -154,6 +160,15 @@ func (rn *testRaftNode) StepMsgAppRespForAdmittedLocked(msg raftpb.Message) erro return nil } +func (rn *testRaftNode) FollowerStateRaftMuLocked( + replicaID roachpb.ReplicaID, +) rac2.FollowerStateInfo { + rn.r.mu.AssertHeld() + fmt.Fprintf(rn.b, " RaftNode.FollowerStateRaftMuLocked(%v)\n", replicaID) + // TODO(kvoli,sumeerbhola): implement. + return rac2.FollowerStateInfo{} +} + func admittedString(admitted [raftpb.NumPriorities]uint64) string { return fmt.Sprintf("[%d, %d, %d, %d]", admitted[0], admitted[1], admitted[2], admitted[3]) } @@ -166,37 +181,54 @@ type testAdmittedPiggybacker struct { b *strings.Builder } -func (p *testAdmittedPiggybacker) AddMsgAppRespForLeader( - n roachpb.NodeID, s roachpb.StoreID, r roachpb.RangeID, msg raftpb.Message, +func (p *testAdmittedPiggybacker) Add( + n roachpb.NodeID, m kvflowcontrolpb.PiggybackedAdmittedState, ) { - fmt.Fprintf(p.b, " Piggybacker.AddMsgAppRespForLeader(leader=(n%s,s%s,r%s), msg=%s)\n", - n, s, r, msgString(msg)) + fmt.Fprintf(p.b, " Piggybacker.Add(n%s, %s)\n", n, m) } type testACWorkQueue struct { b *strings.Builder + // TODO(sumeer): test case that sets this to true. + returnFalse bool } -func (q *testACWorkQueue) Admit(ctx context.Context, entry EntryForAdmission) { - fmt.Fprintf(q.b, " ACWorkQueue.Admit(%+v)\n", entry) +func (q *testACWorkQueue) Admit(ctx context.Context, entry EntryForAdmission) bool { + fmt.Fprintf(q.b, " ACWorkQueue.Admit(%+v) = %t\n", entry, !q.returnFalse) + return !q.returnFalse } type testRangeControllerFactory struct { - b *strings.Builder + b *strings.Builder + rcs []*testRangeController } -func (f *testRangeControllerFactory) New(state rangeControllerInitState) rac2.RangeController { +func (f *testRangeControllerFactory) New( + ctx context.Context, state rangeControllerInitState, +) rac2.RangeController { fmt.Fprintf(f.b, " RangeControllerFactory.New(replicaSet=%s, leaseholder=%s, nextRaftIndex=%d)\n", state.replicaSet, state.leaseholder, state.nextRaftIndex) - return &testRangeController{b: f.b} + rc := &testRangeController{b: f.b, waited: true} + f.rcs = append(f.rcs, rc) + return rc } type testRangeController struct { - b *strings.Builder + b *strings.Builder + waited bool + waitForEvalErr error } -func (c *testRangeController) WaitForEval(ctx context.Context, pri admissionpb.WorkPriority) error { - panic("WaitForEval should not be called") +func (c *testRangeController) WaitForEval( + ctx context.Context, pri admissionpb.WorkPriority, +) (bool, error) { + errStr := "" + if c.waitForEvalErr != nil { + errStr = c.waitForEvalErr.Error() + } + fmt.Fprintf(c.b, " RangeController.WaitForEval(pri=%s) = (waited=%t err=%s)\n", + pri.String(), c.waited, errStr) + return c.waited, c.waitForEvalErr } func raftEventString(e rac2.RaftEvent) string { @@ -242,13 +274,19 @@ func (c *testRangeController) CloseRaftMuLocked(ctx context.Context) { } func TestProcessorBasic(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() var b strings.Builder var r *testReplica var sched testRaftScheduler var piggybacker testAdmittedPiggybacker var q testACWorkQueue var rcFactory testRangeControllerFactory + var st *cluster.Settings var p *processorImpl + tenantID := roachpb.MustMakeTenantID(4) reset := func(enabled EnabledWhenLeaderLevel) { b.Reset() r = newTestReplica(&b) @@ -256,21 +294,24 @@ func TestProcessorBasic(t *testing.T) { piggybacker = testAdmittedPiggybacker{b: &b} q = testACWorkQueue{b: &b} rcFactory = testRangeControllerFactory{b: &b} + st = cluster.MakeTestingClusterSettings() + kvflowcontrol.Mode.Override(ctx, &st.SV, kvflowcontrol.ApplyToElastic) p = NewProcessor(ProcessorOptions{ NodeID: 1, StoreID: 2, RangeID: 3, - TenantID: roachpb.MustMakeTenantID(4), ReplicaID: 5, Replica: r, RaftScheduler: &sched, AdmittedPiggybacker: &piggybacker, ACWorkQueue: &q, RangeControllerFactory: &rcFactory, + Settings: st, EnabledWhenLeaderLevel: enabled, + EvalWaitMetrics: rac2.NewEvalWaitMetrics(), }).(*processorImpl) fmt.Fprintf(&b, "n%s,s%s,r%s: replica=%s, tenant=%s, enabled-level=%s\n", - p.opts.NodeID, p.opts.StoreID, p.opts.RangeID, p.opts.ReplicaID, p.opts.TenantID, + p.opts.NodeID, p.opts.StoreID, p.opts.RangeID, p.opts.ReplicaID, tenantID, enabledLevelString(p.mu.enabledWhenLeader)) } builderStr := func() string { @@ -279,11 +320,10 @@ func TestProcessorBasic(t *testing.T) { return str } printRaftState := func() { - fmt.Fprintf(&b, "Raft: leader: %d leaseholder: %d stable: %d next-unstable: %d my-term: %d admitted: %s", + fmt.Fprintf(&b, "Raft: leader: %d leaseholder: %d stable: %d next-unstable: %d term: %d admitted: %s", r.raftNode.leader, r.leaseholder, r.raftNode.stableIndex, r.raftNode.nextUnstableIndex, - r.raftNode.myLeaderTerm, admittedString(r.raftNode.admitted)) + r.raftNode.term, admittedString(r.raftNode.admitted)) } - ctx := context.Background() datadriven.RunTest(t, datapathutils.TestDataPath(t, "processor"), func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { @@ -317,7 +357,7 @@ func TestProcessorBasic(t *testing.T) { if d.HasArg("my-leader-term") { var myLeaderTerm uint64 d.ScanArgs(t, "my-leader-term", &myLeaderTerm) - r.raftNode.myLeaderTerm = myLeaderTerm + r.raftNode.term = myLeaderTerm } if d.HasArg("leaseholder") { var leaseholder int @@ -333,7 +373,7 @@ func TestProcessorBasic(t *testing.T) { case "set-enabled-level": enabledLevel := parseEnabledLevel(t, d) - p.SetEnabledWhenLeaderRaftMuLocked(enabledLevel) + p.SetEnabledWhenLeaderRaftMuLocked(ctx, enabledLevel) return builderStr() case "get-enabled-level": @@ -343,25 +383,26 @@ func TestProcessorBasic(t *testing.T) { case "on-desc-changed": desc := parseRangeDescriptor(t, d) - p.OnDescChangedLocked(ctx, &desc) + p.OnDescChangedLocked(ctx, &desc, tenantID) return builderStr() case "handle-raft-ready-and-admit": - var entries []raftpb.Entry + var event rac2.RaftEvent if d.HasArg("entries") { var arg string d.ScanArgs(t, "entries", &arg) - entries = createEntries(t, parseEntryInfos(t, arg)) + event.Entries = createEntries(t, parseEntryInfos(t, arg)) + } + if len(event.Entries) > 0 { + d.ScanArgs(t, "leader-term", &event.Term) } fmt.Fprintf(&b, "HandleRaftReady:\n") - p.HandleRaftReadyRaftMuLocked(ctx, entries) + p.HandleRaftReadyRaftMuLocked(ctx, event) fmt.Fprintf(&b, ".....\n") - if len(entries) > 0 { - var leaderTerm uint64 - d.ScanArgs(t, "leader-term", &leaderTerm) + if len(event.Entries) > 0 { fmt.Fprintf(&b, "AdmitRaftEntries:\n") - isV2 := p.AdmitRaftEntriesFromMsgStorageAppendRaftMuLocked(ctx, leaderTerm, entries) - fmt.Fprintf(&b, "leader-using-v2: %t\n", isV2) + destroyedOrV2 := p.AdmitRaftEntriesRaftMuLocked(ctx, event) + fmt.Fprintf(&b, "destroyed-or-leader-using-v2: %t\n", destroyedOrV2) } return builderStr() @@ -406,31 +447,71 @@ func TestProcessorBasic(t *testing.T) { return builderStr() case "admitted-log-entry": - var replicaID int - d.ScanArgs(t, "replica-id", &replicaID) - var leaderTerm uint64 - d.ScanArgs(t, "leader-term", &leaderTerm) - var index uint64 - d.ScanArgs(t, "index", &index) + var cb EntryForAdmissionCallbackState + d.ScanArgs(t, "leader-term", &cb.Mark.Term) + d.ScanArgs(t, "index", &cb.Mark.Index) var pri int d.ScanArgs(t, "pri", &pri) - cb := EntryForAdmissionCallbackState{ - StoreID: 2, - RangeID: 3, - ReplicaID: roachpb.ReplicaID(replicaID), - LeaderTerm: leaderTerm, - Index: index, - Priority: raftpb.Priority(pri), - } + cb.Priority = raftpb.Priority(pri) p.AdmittedLogEntry(ctx, cb) return builderStr() + case "set-flow-control-mode": + var mode string + d.ScanArgs(t, "mode", &mode) + var modeVal kvflowcontrol.ModeT + switch mode { + case "apply-to-all": + modeVal = kvflowcontrol.ApplyToAll + case "apply-to-elastic": + modeVal = kvflowcontrol.ApplyToElastic + default: + t.Fatalf("unknown mode: %s", mode) + } + kvflowcontrol.Mode.Override(ctx, &st.SV, modeVal) + return builderStr() + + case "admit-for-eval": + pri := parseAdmissionPriority(t, d) + // The callee ignores the create time. + admitted, err := p.AdmitForEval(ctx, pri, time.Time{}) + fmt.Fprintf(&b, "admitted: %t err: ", admitted) + if err == nil { + fmt.Fprintf(&b, "\n") + } else { + fmt.Fprintf(&b, "%s\n", err.Error()) + } + return builderStr() + + case "set-wait-for-eval-return-values": + rc := rcFactory.rcs[len(rcFactory.rcs)-1] + d.ScanArgs(t, "waited", &rc.waited) + rc.waitForEvalErr = nil + if d.HasArg("err") { + var errStr string + d.ScanArgs(t, "err", &errStr) + rc.waitForEvalErr = errors.Errorf("%s", errStr) + } + return builderStr() + default: return fmt.Sprintf("unknown command: %s", d.Cmd) } }) } +func parseAdmissionPriority(t *testing.T, td *datadriven.TestData) admissionpb.WorkPriority { + var priStr string + td.ScanArgs(t, "pri", &priStr) + for k, v := range admissionpb.WorkPriorityDict { + if v == priStr { + return k + } + } + t.Fatalf("unknown priority %s", priStr) + return admissionpb.NormalPri +} + func parseEnabledLevel(t *testing.T, td *datadriven.TestData) EnabledWhenLeaderLevel { if td.HasArg("enabled-level") { var str string diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/close_scheduler b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/close_scheduler new file mode 100644 index 000000000000..f76866016866 --- /dev/null +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/close_scheduler @@ -0,0 +1,148 @@ +init +---- +now=0s + +tick duration=1s +---- +now=1s waiting=[] +complete: + +schedule +range_id=1 delay=1s +---- +submitted now=1s + range_id=1 @ 2s (1s+1s) + +# There shouldn't be any scheduled events completed yet, as 1s hasn't passed +# since scheduling the first one. +tick duration=0 +---- +now=1s waiting=[(r1,t2s)] +complete: + +# The first scheduled event for range_id=1 should be completed as exactly 1s has passed. +tick duration=1s +---- +now=2s waiting=[] +complete: + 2s: range_id=1 + +# Next, try out scheduling multiple events, with some delays being identical. +schedule +range_id=1 delay=1s +range_id=1 delay=1s +range_id=2 delay=2s +range_id=3 delay=3s +range_id=4 delay=4s +---- +submitted now=2s + range_id=1 @ 3s (2s+1s) + range_id=1 @ 3s (2s+1s) + range_id=2 @ 4s (2s+2s) + range_id=3 @ 5s (2s+3s) + range_id=4 @ 6s (2s+4s) + +# None of these newly scheduled events should have completed yet. +tick duration=0 +---- +now=2s waiting=[(r1,t3s),(r1,t3s),(r2,t4s),(r3,t5s),(r4,t6s)] +complete: + 2s: range_id=1 + +# The events for range_id=1 should have all completed, expect three events, one +# on the first second and the other two a second after. +tick duration=1s +---- +now=3s waiting=[(r2,t4s),(r3,t5s),(r4,t6s)] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + +tick duration=1s +---- +now=4s waiting=[(r3,t5s),(r4,t6s)] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + +tick duration=1s +---- +now=5s waiting=[(r4,t6s)] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + 5s: range_id=3 + + +# All scheduled events should have now been removed. +tick duration=1s +---- +now=6s waiting=[] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + 5s: range_id=3 + 6s: range_id=4 + +# Schedule an event with a longer delay, then schedule another event after 1s +# with a shorter delay. The newer event should still complete before the +# earlier one. +schedule +range_id=1 delay=3s +---- +submitted now=6s + range_id=1 @ 9s (6s+3s) + +tick duration=1s +---- +now=7s waiting=[(r1,t9s)] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + 5s: range_id=3 + 6s: range_id=4 + +schedule +range_id=2 delay=1s +---- +submitted now=7s + range_id=2 @ 8s (7s+1s) + +# The second scheduled event should now be complete for range_id=2. +tick duration=1s +---- +now=8s waiting=[(r1,t9s)] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + 5s: range_id=3 + 6s: range_id=4 + 8s: range_id=2 + +# The earlier event should now also complete. +tick duration=1s +---- +now=9s waiting=[] +complete: + 2s: range_id=1 + 3s: range_id=1 + 3s: range_id=1 + 4s: range_id=2 + 5s: range_id=3 + 6s: range_id=4 + 8s: range_id=2 + 9s: range_id=1 + +stop +---- diff --git a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/processor b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/processor index 63be0c803487..3622bc89aa1b 100644 --- a/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/processor +++ b/pkg/kv/kvserver/kvflowcontrol/replica_rac2/testdata/processor @@ -14,6 +14,16 @@ on-destroy ---- Replica.RaftMuAssertHeld +# AdmitForEval returns false since there is no RangeController. +admit-for-eval pri=low-pri +---- +admitted: false err: + +# AdmitForEval returns false since there is no RangeController. +admit-for-eval pri=normal-pri +---- +admitted: false err: + reset ---- n1,s2,r3: replica=5, tenant=4, enabled-level=not-enabled @@ -35,7 +45,7 @@ enabled-level: v1-encoding # leaseholder are both on replica-id 10. set-raft-state leader=10 stable-index=20 next-unstable-index=25 leaseholder=10 admitted=[15,20,15,20] ---- -Raft: leader: 10 leaseholder: 10 stable: 20 next-unstable: 25 my-term: 0 admitted: [15, 20, 15, 20] +Raft: leader: 10 leaseholder: 10 stable: 20 next-unstable: 25 term: 0 admitted: [15, 20, 15, 20] # The processor has never been given a range-descriptor, so it will do nothing. handle-raft-ready-and-admit @@ -67,7 +77,7 @@ HandleRaftReady: Replica.MuUnlock ..... AdmitRaftEntries: -leader-using-v2: false +destroyed-or-leader-using-v2: false # Told that the leader is using v2. And that [25,25] has no low-pri override. side-channel v2 leader-term=50 first=25 last=25 @@ -76,7 +86,7 @@ side-channel v2 leader-term=50 first=25 last=25 set-raft-state next-unstable-index=26 ---- -Raft: leader: 10 leaseholder: 10 stable: 20 next-unstable: 26 my-term: 0 admitted: [15, 20, 15, 20] +Raft: leader: 10 leaseholder: 10 stable: 20 next-unstable: 26 term: 0 admitted: [15, 20, 15, 20] # The index 25 entry is v1 encoded, so by default it is low-pri. Admitted is # advanced to the stable-index, and this entry is submitted for admission. @@ -96,13 +106,13 @@ HandleRaftReady: Replica.MuUnlock ..... AdmitRaftEntries: - ACWorkQueue.Admit({TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false CallbackState:{StoreID:2 RangeID:3 ReplicaID:5 LeaderTerm:50 Index:25 Priority:LowPri}}) -leader-using-v2: true + ACWorkQueue.Admit({StoreID:2 TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false RangeID:3 ReplicaID:5 CallbackState:{Mark:{Term:50 Index:25} Priority:LowPri}}) = true +destroyed-or-leader-using-v2: true # Stable index is advanced to 25. set-raft-state stable-index=25 leader=11 ---- -Raft: leader: 11 leaseholder: 10 stable: 25 next-unstable: 26 my-term: 0 admitted: [20, 20, 20, 20] +Raft: leader: 11 leaseholder: 10 stable: 25 next-unstable: 26 term: 0 admitted: [20, 20, 20, 20] # handleRaftReady with no entries. Since the leader is using v2, admitted is # advanced. admitted[low-pri] is 24 since there is an entry at 25 that is not @@ -121,7 +131,7 @@ HandleRaftReady: Replica.MuLock RaftNode.SetAdmittedLocked([24, 25, 25, 25]) = type: MsgAppResp from: 0 to: 0 Replica.MuUnlock - Piggybacker.AddMsgAppRespForLeader(leader=(n11,s11,r3), msg=type: MsgAppResp from: 0 to: 0) + Piggybacker.Add(n11, [r3,s11,5->0] admitted=t0/[]) ..... # Side channel for entries [26, 26] with no low-pri override. @@ -131,7 +141,7 @@ side-channel v2 leader-term=50 first=26 last=26 set-raft-state next-unstable-index=27 ---- -Raft: leader: 11 leaseholder: 10 stable: 25 next-unstable: 27 my-term: 0 admitted: [24, 25, 25, 25] +Raft: leader: 11 leaseholder: 10 stable: 25 next-unstable: 27 term: 0 admitted: [24, 25, 25, 25] # The index 26 entry uses v2 and is using pri=2, which is AboveNormalPri. handle-raft-ready-and-admit entries=v2/i26/t45/pri2/time2/len100 leader-term=50 @@ -147,8 +157,8 @@ HandleRaftReady: Replica.MuUnlock ..... AdmitRaftEntries: - ACWorkQueue.Admit({TenantID:4 Priority:user-high-pri CreateTime:2 RequestedCount:100 Ingested:false CallbackState:{StoreID:2 RangeID:3 ReplicaID:5 LeaderTerm:50 Index:26 Priority:AboveNormalPri}}) -leader-using-v2: true + ACWorkQueue.Admit({StoreID:2 TenantID:4 Priority:user-high-pri CreateTime:2 RequestedCount:100 Ingested:false RangeID:3 ReplicaID:5 CallbackState:{Mark:{Term:50 Index:26} Priority:AboveNormalPri}}) = true +destroyed-or-leader-using-v2: true # handleRaftReady is a noop. handle-raft-ready-and-admit @@ -168,12 +178,7 @@ HandleRaftReady: # admitted, which will happen in the next handleRaftReady. set-raft-state stable-index=26 ---- -Raft: leader: 11 leaseholder: 10 stable: 26 next-unstable: 27 my-term: 0 admitted: [24, 25, 25, 25] - -# Bogus callback which does not match the replica-id, so ignored. index 25 -# (LowPri) is still not admitted. -admitted-log-entry replica-id=51 leader-term=50 index=25 pri=0 ----- +Raft: leader: 11 leaseholder: 10 stable: 26 next-unstable: 27 term: 0 admitted: [24, 25, 25, 25] # Some admitted indices are advanced, but LowPri and AboveNormalPri cannot # advance past the index 25 and index 26 entries respectively, that are @@ -192,11 +197,11 @@ HandleRaftReady: Replica.MuLock RaftNode.SetAdmittedLocked([24, 26, 25, 26]) = type: MsgAppResp from: 0 to: 0 Replica.MuUnlock - Piggybacker.AddMsgAppRespForLeader(leader=(n11,s11,r3), msg=type: MsgAppResp from: 0 to: 0) + Piggybacker.Add(n11, [r3,s11,5->0] admitted=t0/[]) ..... # Callback is accurate and index 25 is admitted. -admitted-log-entry replica-id=5 leader-term=50 index=25 pri=0 +admitted-log-entry leader-term=50 index=25 pri=0 ---- RaftScheduler.EnqueueRaftReady(rangeID=3) @@ -215,7 +220,7 @@ HandleRaftReady: Replica.MuLock RaftNode.SetAdmittedLocked([26, 26, 25, 26]) = type: MsgAppResp from: 0 to: 0 Replica.MuUnlock - Piggybacker.AddMsgAppRespForLeader(leader=(n11,s11,r3), msg=type: MsgAppResp from: 0 to: 0) + Piggybacker.Add(n11, [r3,s11,5->0] admitted=t0/[]) ..... # Side channel for entries [27,27] indicate a low-pri override. @@ -225,7 +230,7 @@ side-channel v2 leader-term=50 first=27 last=27 low-pri set-raft-state next-unstable-index=28 ---- -Raft: leader: 11 leaseholder: 10 stable: 26 next-unstable: 28 my-term: 0 admitted: [26, 26, 25, 26] +Raft: leader: 11 leaseholder: 10 stable: 26 next-unstable: 28 term: 0 admitted: [26, 26, 25, 26] # The index 27 entry is marked AboveNormalPri, but will be treated as LowPri. handle-raft-ready-and-admit entries=v2/i27/t45/pri2/time2/len100 leader-term=50 @@ -241,13 +246,13 @@ HandleRaftReady: Replica.MuUnlock ..... AdmitRaftEntries: - ACWorkQueue.Admit({TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false CallbackState:{StoreID:2 RangeID:3 ReplicaID:5 LeaderTerm:50 Index:27 Priority:LowPri}}) -leader-using-v2: true + ACWorkQueue.Admit({StoreID:2 TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false RangeID:3 ReplicaID:5 CallbackState:{Mark:{Term:50 Index:27} Priority:LowPri}}) = true +destroyed-or-leader-using-v2: true -admitted-log-entry replica-id=5 leader-term=50 index=27 pri=3 +admitted-log-entry leader-term=50 index=27 pri=3 ---- -admitted-log-entry replica-id=5 leader-term=50 index=26 pri=2 +admitted-log-entry leader-term=50 index=26 pri=2 ---- RaftScheduler.EnqueueRaftReady(rangeID=3) @@ -265,7 +270,7 @@ HandleRaftReady: Replica.MuLock RaftNode.SetAdmittedLocked([26, 26, 26, 26]) = type: MsgAppResp from: 0 to: 0 Replica.MuUnlock - Piggybacker.AddMsgAppRespForLeader(leader=(n11,s11,r3), msg=type: MsgAppResp from: 0 to: 0) + Piggybacker.Add(n11, [r3,s11,5->0] admitted=t0/[]) ..... # index 27 is still waiting for admission, but we switch to a new leader that @@ -279,7 +284,7 @@ side-channel v1 leader-term=51 first=27 last=27 # Stable index advanced to 27 set-raft-state stable-index=27 ---- -Raft: leader: 11 leaseholder: 10 stable: 27 next-unstable: 28 my-term: 0 admitted: [26, 26, 26, 26] +Raft: leader: 11 leaseholder: 10 stable: 27 next-unstable: 28 term: 0 admitted: [26, 26, 26, 26] # Noop. handle-raft-ready-and-admit @@ -309,10 +314,10 @@ HandleRaftReady: Replica.MuUnlock ..... AdmitRaftEntries: -leader-using-v2: false +destroyed-or-leader-using-v2: false # Noop. -admitted-log-entry replica-id=5 leader-term=50 index=27 pri=0 +admitted-log-entry leader-term=50 index=27 pri=0 ---- # Same leader switches to v2. @@ -335,7 +340,7 @@ HandleRaftReady: Replica.MuLock RaftNode.SetAdmittedLocked([27, 27, 27, 27]) = type: MsgAppResp from: 0 to: 0 Replica.MuUnlock - Piggybacker.AddMsgAppRespForLeader(leader=(n11,s11,r3), msg=type: MsgAppResp from: 0 to: 0) + Piggybacker.Add(n11, [r3,s11,5->0] admitted=t0/[]) ..... # Noop, since not the leader. @@ -350,16 +355,17 @@ process-piggybacked-admitted # Local replica is becoming the leader. set-raft-state leader=5 my-leader-term=52 ---- -Raft: leader: 5 leaseholder: 10 stable: 27 next-unstable: 28 my-term: 52 admitted: [27, 27, 27, 27] +Raft: leader: 5 leaseholder: 10 stable: 27 next-unstable: 28 term: 52 admitted: [27, 27, 27, 27] on-desc-changed replicas=n11/s11/11,n1/s2/5 ---- Replica.RaftMuAssertHeld Replica.MuAssertHeld + RaftScheduler.EnqueueRaftReady(rangeID=3) set-raft-state next-unstable-index=29 ---- -Raft: leader: 5 leaseholder: 10 stable: 27 next-unstable: 29 my-term: 52 admitted: [27, 27, 27, 27] +Raft: leader: 5 leaseholder: 10 stable: 27 next-unstable: 29 term: 52 admitted: [27, 27, 27, 27] # RangeController is created. handle-raft-ready-and-admit entries=v1/i28/t45/pri0/time2/len100 leader-term=52 @@ -372,17 +378,60 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [27, 27, 27, 27] - RaftNode.MyLeaderTermLocked() = 52 + RaftNode.TermLocked() = 52 Replica.MuUnlock RangeControllerFactory.New(replicaSet=[(n1,s2):5,(n11,s11):11], leaseholder=10, nextRaftIndex=28) RangeController.HandleRaftEventRaftMuLocked([28]) ..... AdmitRaftEntries: - ACWorkQueue.Admit({TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false CallbackState:{StoreID:2 RangeID:3 ReplicaID:5 LeaderTerm:52 Index:28 Priority:LowPri}}) -leader-using-v2: true + ACWorkQueue.Admit({StoreID:2 TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false RangeID:3 ReplicaID:5 CallbackState:{Mark:{Term:52 Index:28} Priority:LowPri}}) = true +destroyed-or-leader-using-v2: true + +# AdmitForEval returns true since there is a RangeController which admitted. +admit-for-eval pri=low-pri +---- + RangeController.WaitForEval(pri=low-pri) = (waited=true err=) +admitted: true err: + +# AdmitForEval returns false despite there being a RangeController since +# normal-pri is not subject to replication AC. RangeController.WaitForEval was +# not called. +admit-for-eval pri=normal-pri +---- +admitted: false err: + +# Subject normal-pri to replication AC. +set-flow-control-mode mode=apply-to-all +---- + +# AdmitForEval for normal-pri returns true. +admit-for-eval pri=normal-pri +---- + RangeController.WaitForEval(pri=normal-pri) = (waited=true err=) +admitted: true err: + +# Change the return value from WaitForEval. +set-wait-for-eval-return-values waited=false +---- + +# Plumbing to WaitForEval is correct. +admit-for-eval pri=normal-pri +---- + RangeController.WaitForEval(pri=normal-pri) = (waited=false err=) +admitted: false err: + +# Set WaitForEval to return an error. +set-wait-for-eval-return-values waited=false err=rc-was-closed +---- + +# Plumbing to WaitForEval is correct. +admit-for-eval pri=normal-pri +---- + RangeController.WaitForEval(pri=normal-pri) = (waited=false err=rc-was-closed) +admitted: false err: rc-was-closed # Entry at index 28 is admitted, but stable index is 27. -admitted-log-entry replica-id=5 leader-term=52 index=28 pri=0 +admitted-log-entry leader-term=52 index=28 pri=0 ---- # Noop. @@ -396,7 +445,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [27, 27, 27, 27] - RaftNode.MyLeaderTermLocked() = 52 + RaftNode.TermLocked() = 52 Replica.MuUnlock RangeController.HandleRaftEventRaftMuLocked([]) ..... @@ -404,7 +453,7 @@ HandleRaftReady: # Stable index advances to 28. set-raft-state stable-index=28 ---- -Raft: leader: 5 leaseholder: 10 stable: 28 next-unstable: 29 my-term: 52 admitted: [27, 27, 27, 27] +Raft: leader: 5 leaseholder: 10 stable: 28 next-unstable: 29 term: 52 admitted: [27, 27, 27, 27] # Admitted is advanced. Since the leader is local, no need to piggyback MsgAppResp. handle-raft-ready-and-admit @@ -417,7 +466,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [27, 27, 27, 27] - RaftNode.MyLeaderTermLocked() = 52 + RaftNode.TermLocked() = 52 Replica.MuUnlock Replica.MuLock RaftNode.SetAdmittedLocked([28, 28, 28, 28]) = type: MsgAppResp from: 0 to: 0 @@ -454,7 +503,7 @@ process-piggybacked-admitted # My leader-term advances. set-raft-state my-leader-term=53 ---- -Raft: leader: 5 leaseholder: 10 stable: 28 next-unstable: 29 my-term: 53 admitted: [28, 28, 28, 28] +Raft: leader: 5 leaseholder: 10 stable: 28 next-unstable: 29 term: 53 admitted: [28, 28, 28, 28] # RangeController is recreated. handle-raft-ready-and-admit @@ -467,7 +516,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [28, 28, 28, 28] - RaftNode.MyLeaderTermLocked() = 53 + RaftNode.TermLocked() = 53 Replica.MuUnlock RangeController.CloseRaftMuLocked RangeControllerFactory.New(replicaSet=[(n1,s2):5,(n11,s11):11], leaseholder=10, nextRaftIndex=29) @@ -478,6 +527,7 @@ on-desc-changed replicas=n11/s11/11,n1/s2/5,n13/s13/13 ---- Replica.RaftMuAssertHeld Replica.MuAssertHeld + RaftScheduler.EnqueueRaftReady(rangeID=3) handle-raft-ready-and-admit ---- @@ -489,7 +539,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [28, 28, 28, 28] - RaftNode.MyLeaderTermLocked() = 53 + RaftNode.TermLocked() = 53 Replica.MuUnlock RangeController.SetReplicasRaftMuLocked([(n1,s2):5,(n11,s11):11,(n13,s13):13]) RangeController.SetLeaseholderRaftMuLocked(10) @@ -539,10 +589,10 @@ HandleRaftReady: Replica.RaftMuAssertHeld ..... AdmitRaftEntries: -leader-using-v2: false +destroyed-or-leader-using-v2: true # Noop. -admitted-log-entry replica-id=5 leader-term=52 index=28 pri=0 +admitted-log-entry leader-term=52 index=28 pri=0 ---- # Test transitioning to v2 protocol after becoming leader. @@ -556,7 +606,7 @@ enabled-level: not-enabled set-raft-state leader=5 my-leader-term=50 leaseholder=5 stable-index=20 next-unstable-index=25 admitted=[15,20,15,20] ---- -Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 25 my-term: 50 admitted: [15, 20, 15, 20] +Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 25 term: 50 admitted: [15, 20, 15, 20] # Noop, since don't know the descriptor. handle-raft-ready-and-admit @@ -582,13 +632,13 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [15, 20, 15, 20] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock ..... set-raft-state next-unstable-index=26 ---- -Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 26 my-term: 50 admitted: [15, 20, 15, 20] +Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 26 term: 50 admitted: [15, 20, 15, 20] # v1 protocol, so does not do anything. handle-raft-ready-and-admit entries=v1/i25/t45/pri0/time2/len100 leader-term=50 @@ -601,11 +651,11 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [15, 20, 15, 20] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock ..... AdmitRaftEntries: -leader-using-v2: false +destroyed-or-leader-using-v2: false # RangeController is created. set-enabled-level enabled-level=v1-encoding @@ -613,14 +663,14 @@ set-enabled-level enabled-level=v1-encoding Replica.RaftMuAssertHeld Replica.MuLock RaftNode.LeaderLocked() = 5 - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 RaftNode.NextUnstableIndexLocked() = 26 Replica.MuUnlock RangeControllerFactory.New(replicaSet=[(n11,s11):11,(n13,s13):13], leaseholder=5, nextRaftIndex=26) set-raft-state next-unstable-index=27 ---- -Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 27 my-term: 50 admitted: [15, 20, 15, 20] +Raft: leader: 5 leaseholder: 5 stable: 20 next-unstable: 27 term: 50 admitted: [15, 20, 15, 20] # Index 26 entry is sent to AC. handle-raft-ready-and-admit entries=v1/i26/t45/pri0/time2/len100 leader-term=50 @@ -633,7 +683,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [15, 20, 15, 20] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock Replica.MuLock RaftNode.SetAdmittedLocked([20, 20, 20, 20]) = type: MsgAppResp from: 0 to: 0 @@ -641,11 +691,11 @@ HandleRaftReady: RangeController.HandleRaftEventRaftMuLocked([26]) ..... AdmitRaftEntries: - ACWorkQueue.Admit({TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false CallbackState:{StoreID:2 RangeID:3 ReplicaID:5 LeaderTerm:50 Index:26 Priority:LowPri}}) -leader-using-v2: true + ACWorkQueue.Admit({StoreID:2 TenantID:4 Priority:low-pri CreateTime:2 RequestedCount:100 Ingested:false RangeID:3 ReplicaID:5 CallbackState:{Mark:{Term:50 Index:26} Priority:LowPri}}) = true +destroyed-or-leader-using-v2: true # Entry is admitted. -admitted-log-entry replica-id=5 leader-term=50 index=26 pri=0 +admitted-log-entry leader-term=50 index=26 pri=0 ---- # Noop, since stable index is still 20. @@ -659,14 +709,14 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [20, 20, 20, 20] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock RangeController.HandleRaftEventRaftMuLocked([]) ..... set-raft-state stable-index=26 ---- -Raft: leader: 5 leaseholder: 5 stable: 26 next-unstable: 27 my-term: 50 admitted: [20, 20, 20, 20] +Raft: leader: 5 leaseholder: 5 stable: 26 next-unstable: 27 term: 50 admitted: [20, 20, 20, 20] # Everything up to 26 is admitted. handle-raft-ready-and-admit @@ -679,7 +729,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [20, 20, 20, 20] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock Replica.MuLock RaftNode.SetAdmittedLocked([26, 26, 26, 26]) = type: MsgAppResp from: 0 to: 0 @@ -689,7 +739,7 @@ HandleRaftReady: set-raft-state next-unstable-index=28 ---- -Raft: leader: 5 leaseholder: 5 stable: 26 next-unstable: 28 my-term: 50 admitted: [26, 26, 26, 26] +Raft: leader: 5 leaseholder: 5 stable: 26 next-unstable: 28 term: 50 admitted: [26, 26, 26, 26] # Index 27 entry is not subject to AC. handle-raft-ready-and-admit entries=none/i27/t45/pri0/time2/len100 leader-term=50 @@ -702,16 +752,16 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [26, 26, 26, 26] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock RangeController.HandleRaftEventRaftMuLocked([27]) ..... AdmitRaftEntries: -leader-using-v2: true +destroyed-or-leader-using-v2: true set-raft-state stable-index=27 ---- -Raft: leader: 5 leaseholder: 5 stable: 27 next-unstable: 28 my-term: 50 admitted: [26, 26, 26, 26] +Raft: leader: 5 leaseholder: 5 stable: 27 next-unstable: 28 term: 50 admitted: [26, 26, 26, 26] # Everything up to 27 is admitted. handle-raft-ready-and-admit @@ -724,7 +774,7 @@ HandleRaftReady: RaftNode.LeaderLocked() = 5 Replica.LeaseholderMuLocked RaftNode.GetAdmittedLocked = [26, 26, 26, 26] - RaftNode.MyLeaderTermLocked() = 50 + RaftNode.TermLocked() = 50 Replica.MuUnlock Replica.MuLock RaftNode.SetAdmittedLocked([27, 27, 27, 27]) = type: MsgAppResp from: 0 to: 0 @@ -735,7 +785,7 @@ HandleRaftReady: # Transition to follower. In this case, the leader is not even known. set-raft-state leader=0 ---- -Raft: leader: 0 leaseholder: 5 stable: 27 next-unstable: 28 my-term: 50 admitted: [27, 27, 27, 27] +Raft: leader: 0 leaseholder: 5 stable: 27 next-unstable: 28 term: 50 admitted: [27, 27, 27, 27] handle-raft-ready-and-admit ---- diff --git a/pkg/kv/kvserver/kvserverpb/raft.proto b/pkg/kv/kvserver/kvserverpb/raft.proto index 761821b6bc38..23e9d262c1b0 100644 --- a/pkg/kv/kvserver/kvserverpb/raft.proto +++ b/pkg/kv/kvserver/kvserverpb/raft.proto @@ -104,10 +104,15 @@ message RaftMessageRequest { // priority of the Entries in the Message are overridden to be // raftpb.LowPri. bool low_priority_override = 13; + // AdmittedState annotates a MsgAppResp message with a vector of admitted log + // indices. Used only with RACv2. + kv.kvserver.kvflowcontrol.kvflowcontrolpb.AdmittedState admitted_state = 14 [(gogoproto.nullable) = false]; // AdmittedResponse is used in RACv2, for piggybacking MsgAppResp messages // from a follower to a leader, that advance admitted for a follower. - repeated kv.kvserver.kvflowcontrol.kvflowcontrolpb.AdmittedResponseForRange admitted_response = 14 [(gogoproto.nullable) = false]; + // + // TODO(pav-kv): remove. + repeated kv.kvserver.kvflowcontrol.kvflowcontrolpb.AdmittedResponseForRange admitted_response = 15 [(gogoproto.nullable) = false]; reserved 10; } @@ -153,6 +158,10 @@ message RaftMessageRequestBatch { // provide a disjointness guarantee to leader leases. util.hlc.Timestamp now = 3 [(gogoproto.nullable) = false, (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/util/hlc.ClockTimestamp"]; + + // AdmittedStates contains admitted vector messages addressed to replicas + // located on the receiver node of this batch. + repeated kv.kvserver.kvflowcontrol.kvflowcontrolpb.PiggybackedAdmittedState admitted_states = 4 [(gogoproto.nullable) = false]; } message RaftMessageResponseUnion { diff --git a/pkg/kv/kvserver/liveness/client_test.go b/pkg/kv/kvserver/liveness/client_test.go index 114de6bb2bc2..c4084d44208c 100644 --- a/pkg/kv/kvserver/liveness/client_test.go +++ b/pkg/kv/kvserver/liveness/client_test.go @@ -373,3 +373,52 @@ func TestGetActiveNodes(t *testing.T) { }) require.Equal(t, []roachpb.NodeID{1, 2, 3, 4}, getActiveNodes(nl1)) } + +// TestLivenessRangeGetsPeriodicallyCompacted tests that the liveness range +// gets compacted when we set the liveness range compaction interval. +func TestLivenessRangeGetsPeriodicallyCompacted(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) + defer tc.Stopper().Stop(ctx) + + // Enable the liveness range compaction and set the interval to 1s to speed + // up the test. + c := tc.Server(0).SystemLayer().SQLConn(t) + _, err := c.ExecContext(ctx, "set cluster setting kv.liveness_range_compact.interval='1s'") + require.NoError(t, err) + + // Get the original file number of the sstable for the liveness range. We + // expect to see this file number change as the liveness range gets compacted. + livenessFileNumberQuery := "WITH replicas(n) AS (SELECT unnest(replicas) FROM " + + "crdb_internal.ranges_no_leases WHERE range_id = 2), sstables AS (SELECT " + + "(crdb_internal.sstable_metrics(n, n, start_key, end_key)).* " + + "FROM crdb_internal.ranges_no_leases, replicas WHERE range_id = 2) " + + "SELECT file_num FROM sstables" + + sqlDB := tc.ApplicationLayer(0).SQLConn(t) + var original_file_num string + testutils.SucceedsSoon(t, func() error { + rows := sqlDB.QueryRow(livenessFileNumberQuery) + if err := rows.Scan(&original_file_num); err != nil { + return err + } + return nil + }) + + // Expect that the liveness file number changes. + testutils.SucceedsSoon(t, func() error { + var current_file_num string + rows := sqlDB.QueryRow(livenessFileNumberQuery) + if err := rows.Scan(¤t_file_num); err != nil { + return err + } + if current_file_num == original_file_num { + return errors.Errorf("Liveness compaction hasn't happened yet") + } + return nil + }) +} diff --git a/pkg/kv/kvserver/logstore/logstore.go b/pkg/kv/kvserver/logstore/logstore.go index ca97df675965..a5d2c016ab13 100644 --- a/pkg/kv/kvserver/logstore/logstore.go +++ b/pkg/kv/kvserver/logstore/logstore.go @@ -73,6 +73,54 @@ func MakeMsgStorageAppend(m raftpb.Message) MsgStorageAppend { return MsgStorageAppend(m) } +// HardState returns the hard state assembled from the message. +func (m *MsgStorageAppend) HardState() raftpb.HardState { + return raftpb.HardState{ + Term: m.Term, + Vote: m.Vote, + Commit: m.Commit, + Lead: m.Lead, + LeadEpoch: m.LeadEpoch, + } +} + +// MustSync returns true if this storage write must be synced. +func (m *MsgStorageAppend) MustSync() bool { + return len(m.Responses) != 0 +} + +// OnDone returns the storage write post-processing information. +func (m *MsgStorageAppend) OnDone() MsgStorageAppendDone { return m.Responses } + +// MsgStorageAppendDone encapsulates the actions to do after MsgStorageAppend is +// done, such as sending messages back to raft node and its peers. +type MsgStorageAppendDone []raftpb.Message + +// Responses returns the messages to send after the write/sync is completed. +func (m MsgStorageAppendDone) Responses() []raftpb.Message { return m } + +// Mark returns the LogMark of the raft log in storage after the write/sync is +// completed. Returns zero value if the write does not update the log mark. +func (m MsgStorageAppendDone) Mark() raft.LogMark { + if len(m) == 0 { + return raft.LogMark{} + } + // Optimization: the MsgStorageAppendResp message, if any, is always the last + // one in the list. + // TODO(pav-kv): this is an undocumented API quirk. Refactor the raft write + // API to be more digestible outside the package. + msg := m[len(m)-1] + if msg.Type != raftpb.MsgStorageAppendResp { + return raft.LogMark{} + } + if len(msg.Entries) != 0 { + return raft.LogMark{Term: msg.LogTerm, Index: msg.Index} + } else if msg.Snapshot != nil { + return raft.LogMark{Term: msg.LogTerm, Index: msg.Snapshot.Metadata.Index} + } + return raft.LogMark{} +} + // RaftState stores information about the last entry and the size of the log. type RaftState struct { LastIndex kvpb.RaftIndex @@ -126,7 +174,7 @@ type LogStore struct { // are associated with the MsgStorageAppend that triggered the fsync. // commitStats is populated iff this was a non-blocking sync. type SyncCallback interface { - OnLogSync(context.Context, []raftpb.Message, storage.BatchCommitStats) + OnLogSync(context.Context, MsgStorageAppendDone, storage.BatchCommitStats) } func newStoreEntriesBatch(eng storage.Engine) storage.Batch { @@ -198,14 +246,7 @@ func (s *LogStore) storeEntriesAndCommitBatch( stats.End = timeutil.Now() } - hs := raftpb.HardState{ - Term: m.Term, - Vote: m.Vote, - Commit: m.Commit, - Lead: m.Lead, - LeadEpoch: m.LeadEpoch, - } - if !raft.IsEmptyHardState(hs) { + if hs := m.HardState(); !raft.IsEmptyHardState(hs) { // NB: Note that without additional safeguards, it's incorrect to write // the HardState before appending m.Entries. When catching up, a follower // will receive Entries that are immediately Committed in the same @@ -239,7 +280,7 @@ func (s *LogStore) storeEntriesAndCommitBatch( // (Replica), so this comment might need to move. stats.PebbleBegin = timeutil.Now() stats.PebbleBytes = int64(batch.Len()) - wantsSync := len(m.Responses) > 0 + wantsSync := m.MustSync() willSync := wantsSync && !DisableSyncRaftLog.Get(&s.Settings.SV) // Use the non-blocking log sync path if we are performing a log sync ... nonBlockingSync := willSync && @@ -271,7 +312,7 @@ func (s *LogStore) storeEntriesAndCommitBatch( *waiterCallback = nonBlockingSyncWaiterCallback{ ctx: ctx, cb: cb, - msgs: m.Responses, + onDone: m.OnDone(), batch: batch, metrics: s.Metrics, logCommitBegin: stats.PebbleBegin, @@ -289,7 +330,7 @@ func (s *LogStore) storeEntriesAndCommitBatch( if wantsSync { logCommitEnd := stats.PebbleEnd s.Metrics.RaftLogCommitLatency.RecordValue(logCommitEnd.Sub(stats.PebbleBegin).Nanoseconds()) - cb.OnLogSync(ctx, m.Responses, storage.BatchCommitStats{}) + cb.OnLogSync(ctx, m.OnDone(), storage.BatchCommitStats{}) } } stats.Sync = wantsSync @@ -339,9 +380,9 @@ func (s *LogStore) storeEntriesAndCommitBatch( // callback. type nonBlockingSyncWaiterCallback struct { // Used to run SyncCallback. - ctx context.Context - cb SyncCallback - msgs []raftpb.Message + ctx context.Context + cb SyncCallback + onDone MsgStorageAppendDone // Used to extract stats. This is the batch that has been synced. batch storage.WriteBatch // Used to record Metrics. @@ -354,7 +395,7 @@ func (cb *nonBlockingSyncWaiterCallback) run() { dur := timeutil.Since(cb.logCommitBegin).Nanoseconds() cb.metrics.RaftLogCommitLatency.RecordValue(dur) commitStats := cb.batch.CommitStats() - cb.cb.OnLogSync(cb.ctx, cb.msgs, commitStats) + cb.cb.OnLogSync(cb.ctx, cb.onDone, commitStats) cb.release() } diff --git a/pkg/kv/kvserver/logstore/logstore_bench_test.go b/pkg/kv/kvserver/logstore/logstore_bench_test.go index 88722d5f401c..d9809f2ea71c 100644 --- a/pkg/kv/kvserver/logstore/logstore_bench_test.go +++ b/pkg/kv/kvserver/logstore/logstore_bench_test.go @@ -38,7 +38,7 @@ func (b *discardBatch) Commit(bool) error { type noopSyncCallback struct{} -func (noopSyncCallback) OnLogSync(context.Context, []raftpb.Message, storage.BatchCommitStats) {} +func (noopSyncCallback) OnLogSync(context.Context, MsgStorageAppendDone, storage.BatchCommitStats) {} func BenchmarkLogStore_StoreEntries(b *testing.B) { defer log.Scope(b).Close(b) diff --git a/pkg/kv/kvserver/logstore/sideload_test.go b/pkg/kv/kvserver/logstore/sideload_test.go index b6ad6214fe89..eb07ac6712aa 100644 --- a/pkg/kv/kvserver/logstore/sideload_test.go +++ b/pkg/kv/kvserver/logstore/sideload_test.go @@ -607,7 +607,7 @@ func TestSideloadStorageSync(t *testing.T) { // Create a sideloaded storage with an in-memory FS. Use strict MemFS to be // able to emulate crash restart by rolling it back to last synced state. ctx := context.Background() - memFS := vfs.NewStrictMem() + memFS := vfs.NewCrashableMem() env, err := fs.InitEnv(ctx, memFS, "", fs.EnvConfig{}, nil /* statsCollector */) require.NoError(t, err) eng, err := storage.Open(ctx, env, cluster.MakeTestingClusterSettings(), storage.ForTesting) @@ -622,15 +622,13 @@ func TestSideloadStorageSync(t *testing.T) { require.NoError(t, ss.Sync()) } // Cut off all syncs from this point, to emulate a crash. - memFS.SetIgnoreSyncs(true) + crashFS := memFS.CrashClone(vfs.CrashCloneCfg{}) ss = nil eng.Close() // Reset filesystem to the last synced state. - memFS.ResetToSyncedState() - memFS.SetIgnoreSyncs(false) // Emulate process restart. Load from the last synced state. - env, err = fs.InitEnv(ctx, memFS, "", fs.EnvConfig{}, nil /* statsCollector */) + env, err = fs.InitEnv(ctx, crashFS, "", fs.EnvConfig{}, nil /* statsCollector */) require.NoError(t, err) eng, err = storage.Open(ctx, env, cluster.MakeTestingClusterSettings(), storage.ForTesting) require.NoError(t, err) @@ -739,7 +737,7 @@ func TestMkdirAllAndSyncParents(t *testing.T) { wantGone: []string{"../a/b/c"}, }} { t.Run("", func(t *testing.T) { - fs := vfs.NewStrictMem() + fs := vfs.NewCrashableMem() require.NoError(t, fs.MkdirAll(tc.path, os.ModePerm)) for _, dir := range tc.sync { handle, err := fs.OpenDir(dir) @@ -767,7 +765,7 @@ func TestMkdirAllAndSyncParents(t *testing.T) { assertExistence(t, tc.wantGone, true) // After crash and resetting to the synced state, wantExist directories // must exist, and wantGone are lost. - fs.ResetToSyncedState() + fs = fs.CrashClone(vfs.CrashCloneCfg{}) assertExistence(t, tc.wantExist, true) assertExistence(t, tc.wantGone, false) }) diff --git a/pkg/kv/kvserver/metrics.go b/pkg/kv/kvserver/metrics.go index ad7d95630cd7..428686cd01b9 100644 --- a/pkg/kv/kvserver/metrics.go +++ b/pkg/kv/kvserver/metrics.go @@ -120,6 +120,12 @@ var ( Measurement: "Ranges", Unit: metric.Unit_COUNT, } + metaDecommissioningRangeCount = metric.Metadata{ + Name: "ranges.decommissioning", + Help: "Number of ranges with at lease one replica on a decommissioning node", + Measurement: "Ranges", + Unit: metric.Unit_COUNT, + } // Lease request metrics. metaLeaseRequestSuccessCount = metric.Metadata{ @@ -2566,6 +2572,7 @@ type StoreMetrics struct { UnavailableRangeCount *metric.Gauge UnderReplicatedRangeCount *metric.Gauge OverReplicatedRangeCount *metric.Gauge + DecommissioningRangeCount *metric.Gauge // Lease request metrics for successful and failed lease requests. These // count proposals (i.e. it does not matter how many replicas apply the @@ -3263,6 +3270,7 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics { UnavailableRangeCount: metric.NewGauge(metaUnavailableRangeCount), UnderReplicatedRangeCount: metric.NewGauge(metaUnderReplicatedRangeCount), OverReplicatedRangeCount: metric.NewGauge(metaOverReplicatedRangeCount), + DecommissioningRangeCount: metric.NewGauge(metaDecommissioningRangeCount), // Lease request metrics. LeaseRequestSuccessCount: metric.NewCounter(metaLeaseRequestSuccessCount), diff --git a/pkg/kv/kvserver/metrics_test.go b/pkg/kv/kvserver/metrics_test.go index 5f2156e0b71f..6276c0f60b2c 100644 --- a/pkg/kv/kvserver/metrics_test.go +++ b/pkg/kv/kvserver/metrics_test.go @@ -12,13 +12,17 @@ package kvserver import ( "context" + "fmt" "math/rand" "sync" "testing" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" @@ -93,3 +97,36 @@ func TestTenantsStorageMetricsConcurrency(t *testing.T) { } wg.Wait() } + +// TestPebbleDiskWriteMetrics tests the categorized disk write metrics in Pebble. +func TestPebbleDiskWriteMetrics(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + tmpDir, cleanup := testutils.TempDir(t) + defer cleanup() + + ctx := context.Background() + ts, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ + DefaultTestTenant: base.TestControlsTenantsExplicitly, + StoreSpecs: []base.StoreSpec{ + {Size: base.SizeSpec{InBytes: base.MinimumStoreSize}, Path: tmpDir}, + }, + }) + defer ts.Stopper().Stop(ctx) + + // Force a WAL write. + require.NoError(t, kvDB.Put(ctx, "kev", "value")) + + if err := ts.GetStores().(*Stores).VisitStores(func(s *Store) error { + testutils.SucceedsSoon(t, func() error { + if ok := s.Registry().Contains("storage.category-pebble-wal.bytes-written"); !ok { + return fmt.Errorf("missing pebble WAL writes metric") + } + return nil + }) + return nil + }); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/kv/kvserver/mvcc_gc_queue.go b/pkg/kv/kvserver/mvcc_gc_queue.go index 9f24b7c4456b..91a8babdab54 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue.go +++ b/pkg/kv/kvserver/mvcc_gc_queue.go @@ -598,7 +598,7 @@ func (r *replicaGCer) template() kvpb.GCRequest { func (r *replicaGCer) send(ctx context.Context, req kvpb.GCRequest) error { n := atomic.AddInt32(&r.count, 1) - log.Eventf(ctx, "sending batch %d (%d keys)", n, len(req.Keys)) + log.Eventf(ctx, "sending batch %d (%d keys, %d rangekeys)", n, len(req.Keys), len(req.RangeKeys)) ba := &kvpb.BatchRequest{} // Technically not needed since we're talking directly to the Replica. diff --git a/pkg/kv/kvserver/queue.go b/pkg/kv/kvserver/queue.go index db7c619fa74d..0e4d251296d8 100644 --- a/pkg/kv/kvserver/queue.go +++ b/pkg/kv/kvserver/queue.go @@ -915,7 +915,6 @@ func (bq *baseQueue) processOneAsyncAndReleaseSem( // it is no longer processable, return immediately. if _, err := bq.replicaCanBeProcessed(ctx, repl, false /*acquireLeaseIfNeeded */); err != nil { bq.finishProcessingReplica(ctx, stopper, repl, err) - log.Infof(ctx, "%s: skipping %d since replica can't be processed %v", taskName, repl.ReplicaID(), err) <-bq.processSem return } @@ -1080,7 +1079,9 @@ func (bq *baseQueue) replicaCanBeProcessed( st := repl.CurrentLeaseStatus(ctx) if st.IsValid() && !st.OwnedBy(repl.StoreID()) { log.VEventf(ctx, 1, "needs lease; not adding: %v", st.Lease) - return nil, errors.Newf("needs lease, not adding: %v", st.Lease) + // NB: this is an expected error, so make sure it doesn't get + // logged loudly. + return nil, benignerror.New(errors.Newf("needs lease, not adding: %v", st.Lease)) } } } diff --git a/pkg/kv/kvserver/raft.go b/pkg/kv/kvserver/raft.go index 785f75f726c4..244b7b750e61 100644 --- a/pkg/kv/kvserver/raft.go +++ b/pkg/kv/kvserver/raft.go @@ -181,40 +181,11 @@ func logRaftReady(ctx context.Context, ready raft.Ready) { } for i, m := range ready.Messages { fmt.Fprintf(&buf, " Outgoing Message[%d]: %.200s\n", - i, raftDescribeMessage(m, raftEntryFormatter)) + i, raft.DescribeMessage(m, raftEntryFormatter)) } log.Infof(ctx, "raft ready (must-sync=%t)\n%s", ready.MustSync, buf.String()) } -// This is a fork of raft.DescribeMessage with a tweak to avoid logging -// snapshot data. -func raftDescribeMessage(m raftpb.Message, f raft.EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(raft.DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if m.Snapshot != nil { - snap := *m.Snapshot - snap.Data = nil - fmt.Fprintf(&buf, " Snapshot:%v", snap) - } - return buf.String() -} - func raftEntryFormatter(data []byte) string { if len(data) == 0 { return "[empty]" diff --git a/pkg/kv/kvserver/raft_transport.go b/pkg/kv/kvserver/raft_transport.go index 11b9ad9aaad5..5155aa8f8152 100644 --- a/pkg/kv/kvserver/raft_transport.go +++ b/pkg/kv/kvserver/raft_transport.go @@ -271,7 +271,8 @@ type RaftTransport struct { } // kvflowcontrol2 is used for replication admission control v2. kvflowcontrol2 struct { - piggybackReader node_rac2.PiggybackMsgReader + piggybackReader node_rac2.PiggybackMsgReader + piggybackedResponseScheduler PiggybackedAdmittedResponseScheduler } knobs *RaftTransportTestingKnobs @@ -297,7 +298,7 @@ func NewDummyRaftTransport( } return NewRaftTransport(ambient, st, nil, clock, nodedialer.New(nil, resolver), nil, kvflowdispatch.NewDummyDispatch(), NoopStoresFlowControlIntegration{}, - NoopRaftTransportDisconnectListener{}, nil, nil, + NoopRaftTransportDisconnectListener{}, nil, nil, nil, ) } @@ -313,6 +314,7 @@ func NewRaftTransport( kvflowHandles kvflowcontrol.Handles, disconnectListener RaftTransportDisconnectListener, piggybackReader node_rac2.PiggybackMsgReader, + piggybackedResponseScheduler PiggybackedAdmittedResponseScheduler, knobs *RaftTransportTestingKnobs, ) *RaftTransport { if knobs == nil { @@ -331,6 +333,7 @@ func NewRaftTransport( t.kvflowControl.disconnectListener = disconnectListener t.kvflowControl.mu.connectionTracker = newConnectionTrackerForFlowControl() t.kvflowcontrol2.piggybackReader = piggybackReader + t.kvflowcontrol2.piggybackedResponseScheduler = piggybackedResponseScheduler t.initMetrics() if grpcServer != nil { @@ -530,6 +533,16 @@ func (t *RaftTransport) RaftMessageBatch(stream MultiRaft_RaftMessageBatchServer t.kvflowControl.mu.Lock() t.kvflowControl.mu.connectionTracker.markStoresConnected(storeIDs) t.kvflowControl.mu.Unlock() + if len(batch.AdmittedStates) != 0 { + // TODO(pav-kv): dispatch admitted vectors to RACv2. + // NB: we do this via this special path instead of using the + // handleRaftRequest path since we don't have a full-fledged + // RaftMessageRequest for each range (each of these responses could + // be for a different range), and because what we need to do w.r.t. + // queueing is much simpler (we don't need to worry about queue size + // since we only keep the latest message from each replica). + _ = t.kvflowcontrol2.piggybackedResponseScheduler.ScheduleAdmittedResponseForRangeRACv2 + } if len(batch.Requests) == 0 { continue } @@ -732,9 +745,14 @@ func (t *RaftTransport) processQueue( } // For replication admission control v2. - maybeAnnotateWithAdmittedResponses := func( - req *kvserverpb.RaftMessageRequest, admitted []kvflowcontrolpb.AdmittedResponseForRange) { - req.AdmittedResponse = append(req.AdmittedResponse, admitted...) + maybeAnnotateWithAdmittedStates := func( + batch *kvserverpb.RaftMessageRequestBatch, admitted []kvflowcontrolpb.PiggybackedAdmittedState, + ) { + // TODO(pav-kv): send these protos once they are populated correctly. + if true { + return + } + batch.AdmittedStates = append(batch.AdmittedStates, admitted...) } annotateWithClockTimestamp := func(batch *kvserverpb.RaftMessageRequestBatch) { @@ -750,6 +768,10 @@ func (t *RaftTransport) processQueue( batch.Requests = batch.Requests[:0] batch.StoreIDs = nil batch.Now = hlc.ClockTimestamp{} + for i := range batch.AdmittedStates { + batch.AdmittedStates[i] = kvflowcontrolpb.PiggybackedAdmittedState{} + } + batch.AdmittedStates = batch.AdmittedStates[:0] } var raftIdleTimer timeutil.Timer @@ -789,7 +811,7 @@ func (t *RaftTransport) processQueue( budget := targetRaftOutgoingBatchSize.Get(&t.st.SV) - size var pendingDispatches []kvflowcontrolpb.AdmittedRaftLogEntries - var admittedResponses []kvflowcontrolpb.AdmittedResponseForRange + var admittedStates []kvflowcontrolpb.PiggybackedAdmittedState if disableFn := t.knobs.DisablePiggyBackedFlowTokenDispatch; disableFn == nil || !disableFn() { // RACv1. // @@ -813,9 +835,9 @@ func (t *RaftTransport) processQueue( maybeAnnotateWithAdmittedRaftLogEntries(req, pendingDispatches) // RACv2. - admittedResponses, _ = t.kvflowcontrol2.piggybackReader.PopMsgsForNode( + admittedStates, _ = t.kvflowcontrol2.piggybackReader.PopMsgsForNode( timeutil.Now(), q.nodeID, kvadmission.FlowTokenDispatchMaxBytes.Get(&t.st.SV)) - maybeAnnotateWithAdmittedResponses(req, admittedResponses) + maybeAnnotateWithAdmittedStates(batch, admittedStates) } batch.Requests = append(batch.Requests, *req) @@ -837,9 +859,10 @@ func (t *RaftTransport) processQueue( maybeAnnotateWithStoreIDs(batch) annotateWithClockTimestamp(batch) + if err := stream.Send(batch); err != nil { t.metrics.FlowTokenDispatchesDropped.Inc(int64( - len(pendingDispatches) + len(admittedResponses))) + len(pendingDispatches) + len(admittedStates))) return err } t.metrics.MessagesSent.Inc(int64(len(batch.Requests))) @@ -859,9 +882,9 @@ func (t *RaftTransport) processQueue( kvadmission.FlowTokenDispatchMaxBytes.Get(&t.st.SV), ) // RACv2. - admittedResponses, remainingAdmittedResponses := t.kvflowcontrol2.piggybackReader.PopMsgsForNode( + admittedStates, remainingAdmittedResponses := t.kvflowcontrol2.piggybackReader.PopMsgsForNode( timeutil.Now(), q.nodeID, kvadmission.FlowTokenDispatchMaxBytes.Get(&t.st.SV)) - if len(pendingDispatches) == 0 && len(admittedResponses) == 0 { + if len(pendingDispatches) == 0 && len(admittedStates) == 0 { continue // nothing to do } // If there are remaining dispatches/responses, schedule them @@ -872,15 +895,16 @@ func (t *RaftTransport) processQueue( req := newRaftMessageRequest() maybeAnnotateWithAdmittedRaftLogEntries(req, pendingDispatches) - maybeAnnotateWithAdmittedResponses(req, admittedResponses) batch.Requests = append(batch.Requests, *req) releaseRaftMessageRequest(req) maybeAnnotateWithStoreIDs(batch) annotateWithClockTimestamp(batch) + maybeAnnotateWithAdmittedStates(batch, admittedStates) + if err := stream.Send(batch); err != nil { t.metrics.FlowTokenDispatchesDropped.Inc(int64( - len(pendingDispatches) + len(admittedResponses))) + len(pendingDispatches) + len(admittedStates))) return err } t.metrics.MessagesSent.Inc(int64(len(batch.Requests))) diff --git a/pkg/kv/kvserver/raft_transport_test.go b/pkg/kv/kvserver/raft_transport_test.go index 8a902128dec6..e19c52f99ed5 100644 --- a/pkg/kv/kvserver/raft_transport_test.go +++ b/pkg/kv/kvserver/raft_transport_test.go @@ -166,7 +166,7 @@ func (rttc *raftTransportTestContext) AddNode(nodeID roachpb.NodeID) *kvserver.R kvflowdispatch.NewDummyDispatch(), kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), - nil, + nil, nil, ) rttc.GossipNode(nodeID, addr) return transport @@ -184,6 +184,7 @@ func (rttc *raftTransportTestContext) AddNodeWithoutGossip( kvflowHandles kvflowcontrol.Handles, disconnectListener kvserver.RaftTransportDisconnectListener, piggybacker node_rac2.PiggybackMsgReader, + piggybackedResponseScheduler kvserver.PiggybackedAdmittedResponseScheduler, knobs *kvserver.RaftTransportTestingKnobs, ) (*kvserver.RaftTransport, net.Addr) { manual := hlc.NewHybridManualClock() @@ -202,6 +203,7 @@ func (rttc *raftTransportTestContext) AddNodeWithoutGossip( kvflowHandles, disconnectListener, piggybacker, + piggybackedResponseScheduler, knobs, ) rttc.transports[nodeID] = transport @@ -474,7 +476,7 @@ func TestRaftTransportCircuitBreaker(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), - nil, + nil, nil, ) serverChannel := rttc.ListenStore(serverReplica.NodeID, serverReplica.StoreID) @@ -589,7 +591,7 @@ func TestReopenConnection(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), - nil, + nil, nil, ) rttc.GossipNode(serverReplica.NodeID, serverAddr) rttc.ListenStore(serverReplica.NodeID, serverReplica.StoreID) @@ -629,7 +631,7 @@ func TestReopenConnection(t *testing.T) { kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), - nil, + nil, nil, ) replacementChannel := rttc.ListenStore(replacementReplica.NodeID, replacementReplica.StoreID) diff --git a/pkg/kv/kvserver/raft_transport_unit_test.go b/pkg/kv/kvserver/raft_transport_unit_test.go index 3d735a50d105..8d0cb09cb2f1 100644 --- a/pkg/kv/kvserver/raft_transport_unit_test.go +++ b/pkg/kv/kvserver/raft_transport_unit_test.go @@ -80,6 +80,7 @@ func TestRaftTransportStartNewQueue(t *testing.T) { NoopStoresFlowControlIntegration{}, NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), + nil, /* PiggybackedAdmittedResponseScheduler */ nil, /* knobs */ ) diff --git a/pkg/kv/kvserver/rangefeed/BUILD.bazel b/pkg/kv/kvserver/rangefeed/BUILD.bazel index 5c4f6e2f4e16..cdb82453c0b5 100644 --- a/pkg/kv/kvserver/rangefeed/BUILD.bazel +++ b/pkg/kv/kvserver/rangefeed/BUILD.bazel @@ -5,6 +5,8 @@ go_library( srcs = [ "budget.go", "buffered_registration.go", + "buffered_sender.go", + "buffered_stream.go", "catchup_scan.go", "event_size.go", "filter.go", @@ -14,10 +16,11 @@ go_library( "resolved_timestamp.go", "scheduled_processor.go", "scheduler.go", - "stream_muxer.go", - "stream_muxer_test_helper.go", + "stream.go", "task.go", + "test_helpers.go", "testutil.go", + "unbuffered_sender.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed", visibility = ["//visibility:public"], @@ -29,6 +32,8 @@ go_library( "//pkg/roachpb", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/rowenc/keyside", + "//pkg/sql/sem/tree", "//pkg/storage", "//pkg/storage/enginepb", "//pkg/storage/fs", @@ -36,6 +41,7 @@ go_library( "//pkg/util/bufalloc", "//pkg/util/buildutil", "//pkg/util/container/heap", + "//pkg/util/encoding", "//pkg/util/envutil", "//pkg/util/hlc", "//pkg/util/interval", @@ -54,19 +60,20 @@ go_library( go_test( name = "rangefeed_test", - size = "large", srcs = [ "bench_test.go", "budget_test.go", "catchup_scan_bench_test.go", "catchup_scan_test.go", "event_size_test.go", + "processor_helpers_test.go", "processor_test.go", "registry_test.go", "resolved_timestamp_test.go", "scheduler_test.go", - "stream_muxer_test.go", + "sender_helper_test.go", "task_test.go", + "unbuffered_sender_test.go", ], embed = [":rangefeed"], deps = [ @@ -79,7 +86,6 @@ go_test( "//pkg/settings/cluster", "//pkg/sql/randgen", "//pkg/sql/rowenc/keyside", - "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/storage", "//pkg/storage/enginepb", diff --git a/pkg/kv/kvserver/rangefeed/bench_test.go b/pkg/kv/kvserver/rangefeed/bench_test.go index 1c40250bd734..0f981ccad827 100644 --- a/pkg/kv/kvserver/rangefeed/bench_test.go +++ b/pkg/kv/kvserver/rangefeed/bench_test.go @@ -201,14 +201,15 @@ func (s *noopStream) Context() context.Context { return s.ctx } -func (s *noopStream) Send(*kvpb.RangeFeedEvent) error { +func (s *noopStream) SendUnbuffered(*kvpb.RangeFeedEvent) error { s.events++ return nil } -// Note that Send itself is not thread-safe, but it is written to be used only -// in a single threaded environment in this test, ensuring thread-safety. -func (s *noopStream) SendIsThreadSafe() {} +// Note that SendUnbuffered itself is not thread-safe, but it is written to be +// used only in a single threaded environment in this test, ensuring +// thread-safety. +func (s *noopStream) SendUnbufferedIsThreadSafe() {} // Disconnect implements the Stream interface. It mocks the disconnect behavior // by sending the error to the done channel. diff --git a/pkg/kv/kvserver/rangefeed/buffered_registration.go b/pkg/kv/kvserver/rangefeed/buffered_registration.go index 5f6b09eec2c9..4f0683705a04 100644 --- a/pkg/kv/kvserver/rangefeed/buffered_registration.go +++ b/pkg/kv/kvserver/rangefeed/buffered_registration.go @@ -209,7 +209,7 @@ func (br *bufferedRegistration) outputLoop(ctx context.Context) error { firstIteration = false select { case nextEvent := <-br.buf: - err := br.stream.Send(nextEvent.event) + err := br.stream.SendUnbuffered(nextEvent.event) nextEvent.alloc.Release(ctx) putPooledSharedEvent(nextEvent) if err != nil { @@ -271,7 +271,7 @@ func (br *bufferedRegistration) maybeRunCatchUpScan(ctx context.Context) error { br.metrics.RangeFeedCatchUpScanNanos.Inc(timeutil.Since(start).Nanoseconds()) }() - return catchUpIter.CatchUpScan(ctx, br.stream.Send, br.withDiff, br.withFiltering, br.withOmitRemote) + return catchUpIter.CatchUpScan(ctx, br.stream.SendUnbuffered, br.withDiff, br.withFiltering, br.withOmitRemote) } // Wait for this registration to completely process its internal buffer. diff --git a/pkg/kv/kvserver/rangefeed/buffered_sender.go b/pkg/kv/kvserver/rangefeed/buffered_sender.go new file mode 100644 index 000000000000..987bf3c3bffa --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/buffered_sender.go @@ -0,0 +1,109 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/util/stop" +) + +// ┌─────────────────────────────────────────┐ MuxRangefeedEvent +// │ Node.MuxRangeFeed │◄──────────────────────────────────────────────────┐ +// └─────────────────┬───▲───────────────────┘ ▲ │ +// Sender.AddStream │ │LockedMuxStream.Send │ │ +// ┌────────────▼───┴──────────┐ │ │ +// │ Buffered/Unbuffered Sender├───────────┐ │ │ +// └────────────┬──────────────┘ │ │ │ +// │ │ │ │ +// ┌────────▼─────────┐ │ │ │ +// │ Stores.Rangefeed │ │ │ │ +// └────────┬─────────┘ │ │ │ +// │ │ │ │ +// ┌───────▼─────────┐ BufferedSender BufferedSender │ +// │ Store.Rangefeed │ SendUnbuffered/SendBuffered SendBufferedError ─────► BufferedSender.run +// └───────┬─────────┘ (catch-up scan)(live raft) ▲ +// │ ▲ │ +// ┌────────▼──────────┐ │ │ +// │ Replica.Rangefeed │ │ │ +// └────────┬──────────┘ │ │ +// │ │ │ +// ┌───────▼──────┐ │ │ +// │ Registration │ │ │ +// └──────┬───────┘ │ │ +// │ │ │ +// │ │ │ +// └─────────────────────────┘─────────────────┘ +// BufferedPerRangeEventSink.Send BufferedPerRangeEventSink.Disconnect +// +// BufferedSender is embedded in every rangefeed.BufferedPerRangeEventSink, +// serving as a helper which buffers events before forwarding events to the +// underlying gRPC stream. +// +// Refer to the comments above UnbufferedSender for more details on the role of +// senders in the entire rangefeed architecture. +type BufferedSender struct { + // Note that lockedMuxStream wraps the underlying grpc server stream, ensuring + // thread safety. + sender ServerStreamSender + + // metrics is used to record rangefeed metrics for the node. + metrics RangefeedMetricsRecorder +} + +func NewBufferedSender( + sender ServerStreamSender, metrics RangefeedMetricsRecorder, +) *BufferedSender { + return &BufferedSender{ + sender: sender, + metrics: metrics, + } +} + +// SendBuffered buffers the event before sending them to the underlying +// ServerStreamSender. +func (bs *BufferedSender) SendBuffered( + event *kvpb.MuxRangeFeedEvent, alloc *SharedBudgetAllocation, +) error { + panic("unimplemented: buffered sender for rangefeed #126560") +} + +// SendUnbuffered bypasses the buffer and sends the event to the underlying +// ServerStreamSender directly. Note that this can cause event re-ordering. +// Caller is responsible for ensuring that events are sent in order. +func (bs *BufferedSender) SendUnbuffered( + event *kvpb.MuxRangeFeedEvent, alloc *SharedBudgetAllocation, +) error { + panic("unimplemented: buffered sender for rangefeed #126560") +} + +func (bs *BufferedSender) SendBufferedError(ev *kvpb.MuxRangeFeedEvent) { + // Disconnect stream and cancel context. Then call SendBuffered with the error + // event. + panic("unimplemented: buffered sender for rangefeed #126560") +} + +func (bs *BufferedSender) AddStream(streamID int64, cancel context.CancelFunc) { + panic("unimplemented: buffered sender for rangefeed #126560") +} + +func (bs *BufferedSender) Start(ctx context.Context, stopper *stop.Stopper) error { + panic("unimplemented: buffered sender for rangefeed #126560") +} + +func (bs *BufferedSender) Stop() { + panic("unimplemented: buffered sender for rangefeed #126560") +} + +func (bs *BufferedSender) Error() chan error { + panic("unimplemented: buffered sender for rangefeed #126560") +} diff --git a/pkg/kv/kvserver/rangefeed/buffered_stream.go b/pkg/kv/kvserver/rangefeed/buffered_stream.go new file mode 100644 index 000000000000..37895506f6f8 --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/buffered_stream.go @@ -0,0 +1,110 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/roachpb" +) + +// BufferedStream is a Stream that can buffer events before sending them to the +// underlying Stream. Note that the caller may still choose to bypass the buffer +// and send to the underlying Stream directly by calling Send directly. Doing so +// can cause event re-ordering. Caller is responsible for ensuring that events +// are sent in order. +type BufferedStream interface { + Stream + // SendBuffered buffers the event before sending it to the underlying Stream. + SendBuffered(*kvpb.RangeFeedEvent, *SharedBudgetAllocation) error +} + +// BufferedPerRangeEventSink is an implementation of BufferedStream which is +// similar to PerRangeEventSink but buffers events in BufferedSender before +// forwarding events to the underlying grpc stream. +type BufferedPerRangeEventSink struct { + ctx context.Context + rangeID roachpb.RangeID + streamID int64 + wrapped *BufferedSender +} + +func NewBufferedPerRangeEventSink( + ctx context.Context, rangeID roachpb.RangeID, streamID int64, wrapped *BufferedSender, +) *BufferedPerRangeEventSink { + return &BufferedPerRangeEventSink{ + ctx: ctx, + rangeID: rangeID, + streamID: streamID, + wrapped: wrapped, + } +} + +var _ kvpb.RangeFeedEventSink = (*BufferedPerRangeEventSink)(nil) +var _ Stream = (*BufferedPerRangeEventSink)(nil) +var _ BufferedStream = (*BufferedPerRangeEventSink)(nil) + +func (s *BufferedPerRangeEventSink) Context() context.Context { + return s.ctx +} + +// SendUnbufferedIsThreadSafe is a no-op declaration method. It is a contract +// that the SendUnbuffered method is thread-safe. Note that +// BufferedSender.SendBuffered and BufferedSender.SendUnbuffered are both +// thread-safe. +func (s *BufferedPerRangeEventSink) SendUnbufferedIsThreadSafe() {} + +// SendBuffered buffers the event in BufferedSender and transfers the ownership +// of SharedBudgetAllocation to BufferedSender. BufferedSender is responsible +// for properly using and releasing it when an error occurs or when the event is +// sent. The event is guaranteed to be sent unless BufferedSender terminates +// before sending (such as due to broken grpc stream). +// +// If the function returns an error, it is safe to disconnect the stream and +// assume that all future SendBuffered on this stream will return an error. +func (s *BufferedPerRangeEventSink) SendBuffered( + event *kvpb.RangeFeedEvent, alloc *SharedBudgetAllocation, +) error { + response := &kvpb.MuxRangeFeedEvent{ + RangeFeedEvent: *event, + RangeID: s.rangeID, + StreamID: s.streamID, + } + return s.wrapped.SendBuffered(response, alloc) +} + +// SendUnbuffered bypass the buffer and sends the event to the underlying grpc +// stream directly. It blocks until the event is sent or an error occurs. +func (s *BufferedPerRangeEventSink) SendUnbuffered(event *kvpb.RangeFeedEvent) error { + response := &kvpb.MuxRangeFeedEvent{ + RangeFeedEvent: *event, + RangeID: s.rangeID, + StreamID: s.streamID, + } + return s.wrapped.SendUnbuffered(response, nil) +} + +// Disconnect implements the Stream interface. BufferedSender is then +// responsible for canceling the context of the stream. The actual rangefeed +// disconnection from processor happens late when the error event popped from +// the queue and about to be sent to the grpc stream. So caller should not rely +// on immediate disconnection as cleanup takes place async. +func (s *BufferedPerRangeEventSink) Disconnect(err *kvpb.Error) { + ev := &kvpb.MuxRangeFeedEvent{ + StreamID: s.streamID, + RangeID: s.rangeID, + } + ev.MustSetValue(&kvpb.RangeFeedError{ + Error: *transformRangefeedErrToClientError(err), + }) + s.wrapped.SendBufferedError(ev) +} diff --git a/pkg/kv/kvserver/rangefeed/event_size_test.go b/pkg/kv/kvserver/rangefeed/event_size_test.go index a0e4a8f18ad0..8bcaf24f1a03 100644 --- a/pkg/kv/kvserver/rangefeed/event_size_test.go +++ b/pkg/kv/kvserver/rangefeed/event_size_test.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" @@ -37,6 +36,7 @@ var ( pointKV = storageutils.PointKV rangeKV = storageutils.RangeKV ) + var ( testKey = roachpb.Key("/db1") testTxnID = uuid.MakeV4() @@ -251,11 +251,6 @@ func TestEventSizeCalculation(t *testing.T) { } } -func generateRandomizedTs(rand *rand.Rand) hlc.Timestamp { - // Avoid generating zero timestamp which will equal to an empty event. - return hlc.Timestamp{WallTime: int64(rand.Intn(100)) + 1} -} - func generateRandomizedBytes(rand *rand.Rand) []byte { const tableID = 42 dataTypes := []*types.T{types.String, types.Int, types.Decimal, types.Bytes, types.Bool, types.Date, types.Timestamp, types.Float} @@ -272,33 +267,6 @@ func generateRandomizedBytes(rand *rand.Rand) []byte { return key } -func generateStartAndEndKey(rand *rand.Rand) (roachpb.Key, roachpb.Key) { - start := rand.Intn(2 << 20) - end := start + rand.Intn(2<<20) - startDatum := tree.NewDInt(tree.DInt(start)) - endDatum := tree.NewDInt(tree.DInt(end)) - const tableID = 42 - - startKey, err := keyside.Encode( - keys.SystemSQLCodec.TablePrefix(tableID), - startDatum, - encoding.Ascending, - ) - if err != nil { - panic(err) - } - - endKey, err := keyside.Encode( - keys.SystemSQLCodec.TablePrefix(tableID), - endDatum, - encoding.Ascending, - ) - if err != nil { - panic(err) - } - return startKey, endKey -} - func generateRandomizedTxnId(rand *rand.Rand) uuid.UUID { var txnID uuid.UUID n := rand.Intn(100) @@ -326,14 +294,14 @@ func generateRandomTestData(rand *rand.Rand) testData { kvs: testSSTKVs, span: generateRandomizedSpan(rand).AsRawSpanWithNoLocals(), key: generateRandomizedBytes(rand), - timestamp: generateRandomizedTs(rand), + timestamp: GenerateRandomizedTs(rand, 100 /* maxTime */), value: generateRandomizedBytes(rand), startKey: startKey, endKey: endkey, txnID: generateRandomizedTxnId(rand), txnKey: generateRandomizedBytes(rand), txnIsoLevel: isolation.Levels()[rand.Intn(len(isolation.Levels()))], - txnMinTimestamp: generateRandomizedTs(rand), + txnMinTimestamp: GenerateRandomizedTs(rand, 100 /* maxTime */), omitInRangefeeds: rand.Intn(2) == 1, } } diff --git a/pkg/kv/kvserver/rangefeed/processor_helpers_test.go b/pkg/kv/kvserver/rangefeed/processor_helpers_test.go new file mode 100644 index 000000000000..55daa41fb6ed --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/processor_helpers_test.go @@ -0,0 +1,450 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "context" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/storage/enginepb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/stretchr/testify/require" +) + +func makeLogicalOp(val interface{}) enginepb.MVCCLogicalOp { + var op enginepb.MVCCLogicalOp + op.MustSetValue(val) + return op +} + +func writeValueOpWithKV(key roachpb.Key, ts hlc.Timestamp, val []byte) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCWriteValueOp{ + Key: key, + Timestamp: ts, + Value: val, + }) +} + +func writeValueOpWithPrevValue( + key roachpb.Key, ts hlc.Timestamp, val, prevValue []byte, +) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCWriteValueOp{ + Key: key, + Timestamp: ts, + Value: val, + PrevValue: prevValue, + }) +} + +func writeValueOp(ts hlc.Timestamp) enginepb.MVCCLogicalOp { + return writeValueOpWithKV(roachpb.Key("a"), ts, []byte("val")) +} + +func writeIntentOpWithDetails( + txnID uuid.UUID, key []byte, iso isolation.Level, minTS, ts hlc.Timestamp, +) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCWriteIntentOp{ + TxnID: txnID, + TxnKey: key, + TxnIsoLevel: iso, + TxnMinTimestamp: minTS, + Timestamp: ts, + }) +} + +func writeIntentOpFromMeta(txn enginepb.TxnMeta) enginepb.MVCCLogicalOp { + return writeIntentOpWithDetails( + txn.ID, txn.Key, txn.IsoLevel, txn.MinTimestamp, txn.WriteTimestamp) +} + +func writeIntentOpWithKey( + txnID uuid.UUID, key []byte, iso isolation.Level, ts hlc.Timestamp, +) enginepb.MVCCLogicalOp { + return writeIntentOpWithDetails(txnID, key, iso, ts /* minTS */, ts) +} + +func writeIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { + return writeIntentOpWithKey(txnID, nil /* key */, 0, ts) +} + +func updateIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCUpdateIntentOp{ + TxnID: txnID, + Timestamp: ts, + }) +} + +func commitIntentOpWithKV( + txnID uuid.UUID, + key roachpb.Key, + ts hlc.Timestamp, + val []byte, + omitInRangefeeds bool, + originID uint32, +) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCCommitIntentOp{ + TxnID: txnID, + Key: key, + Timestamp: ts, + Value: val, + OmitInRangefeeds: omitInRangefeeds, + OriginID: originID, + }) +} + +func commitIntentOpWithPrevValue( + txnID uuid.UUID, key roachpb.Key, ts hlc.Timestamp, val, prevValue []byte, omitInRangefeeds bool, +) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCCommitIntentOp{ + TxnID: txnID, + Key: key, + Timestamp: ts, + Value: val, + PrevValue: prevValue, + OmitInRangefeeds: omitInRangefeeds, + }) +} + +func commitIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { + return commitIntentOpWithKV(txnID, roachpb.Key("a"), ts, nil /* val */, false /* omitInRangefeeds */, 0 /* originID */) +} + +func abortIntentOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCAbortIntentOp{ + TxnID: txnID, + }) +} + +func abortTxnOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCAbortTxnOp{ + TxnID: txnID, + }) +} + +func deleteRangeOp(startKey, endKey roachpb.Key, timestamp hlc.Timestamp) enginepb.MVCCLogicalOp { + return makeLogicalOp(&enginepb.MVCCDeleteRangeOp{ + StartKey: startKey, + EndKey: endKey, + Timestamp: timestamp, + }) +} + +func makeRangeFeedEvent(val interface{}) *kvpb.RangeFeedEvent { + var event kvpb.RangeFeedEvent + event.MustSetValue(val) + return &event +} + +func rangeFeedValueWithPrev(key roachpb.Key, val, prev roachpb.Value) *kvpb.RangeFeedEvent { + return makeRangeFeedEvent(&kvpb.RangeFeedValue{ + Key: key, + Value: val, + PrevValue: prev, + }) +} + +func rangeFeedValue(key roachpb.Key, val roachpb.Value) *kvpb.RangeFeedEvent { + return rangeFeedValueWithPrev(key, val, roachpb.Value{}) +} + +func rangeFeedCheckpoint(span roachpb.Span, ts hlc.Timestamp) *kvpb.RangeFeedEvent { + return makeRangeFeedEvent(&kvpb.RangeFeedCheckpoint{ + Span: span, + ResolvedTS: ts, + }) +} + +type storeOp struct { + kv storage.MVCCKeyValue + txn *roachpb.Transaction +} + +func makeTestEngineWithData(ops []storeOp) (storage.Engine, error) { + ctx := context.Background() + engine := storage.NewDefaultInMemForTesting() + for _, op := range ops { + kv := op.kv + _, err := storage.MVCCPut(ctx, engine, kv.Key.Key, kv.Key.Timestamp, roachpb.Value{RawBytes: kv.Value}, storage.MVCCWriteOptions{Txn: op.txn}) + if err != nil { + engine.Close() + return nil, err + } + } + return engine, nil +} + +const testProcessorEventCCap = 16 +const testProcessorEventCTimeout = 10 * time.Millisecond + +type processorTestHelper struct { + span roachpb.RSpan + rts *resolvedTimestamp + syncEventC func() + sendSpanSync func(*roachpb.Span) + scheduler *ClientScheduler +} + +// syncEventAndRegistrations waits for all previously sent events to be +// processed *and* for all registration output loops to fully process their own +// internal buffers. +func (h *processorTestHelper) syncEventAndRegistrations() { + h.sendSpanSync(&all) +} + +// syncEventAndRegistrations waits for all previously sent events to be +// processed *and* for matching registration output loops to fully process their +// own internal buffers. +func (h *processorTestHelper) syncEventAndRegistrationsSpan(span roachpb.Span) { + h.sendSpanSync(&span) +} + +// triggerTxnPushUntilPushed will schedule PushTxnQueued events until pushedC +// indicates that a transaction push attempt has started by posting an event. +// If a push does not happen in 10 seconds, the attempt fails. +func (h *processorTestHelper) triggerTxnPushUntilPushed(t *testing.T, pushedC <-chan struct{}) { + timeoutC := time.After(10 * time.Second) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for { + if h.scheduler != nil { + h.scheduler.Enqueue(PushTxnQueued) + } + select { + case <-pushedC: + return + case <-ticker.C: + // We keep sending events to avoid the situation where event arrives + // but flag indicating that push is still running is not reset. + case <-timeoutC: + t.Fatal("failed to get txn push notification") + } + } +} + +type procType bool + +const ( + legacyProcessor procType = false + schedulerProcessor = true +) + +var testTypes = []procType{legacyProcessor, schedulerProcessor} + +func (t procType) String() string { + if t { + return "scheduler" + } + return "legacy" +} + +type testConfig struct { + Config + useScheduler bool + isc IntentScannerConstructor +} + +type option func(*testConfig) + +func withPusher(txnPusher TxnPusher) option { + return func(config *testConfig) { + config.PushTxnsInterval = 10 * time.Millisecond + config.PushTxnsAge = 50 * time.Millisecond + config.TxnPusher = txnPusher + } +} + +func withProcType(t procType) option { + return func(config *testConfig) { + config.useScheduler = bool(t) + } +} + +func withBudget(b *FeedBudget) option { + return func(config *testConfig) { + config.MemBudget = b + } +} + +func withMetrics(m *Metrics) option { + return func(config *testConfig) { + config.Metrics = m + } +} + +func withRtsScanner(scanner IntentScanner) option { + return func(config *testConfig) { + if scanner != nil { + config.isc = func() IntentScanner { + return scanner + } + } + } +} + +func withChanTimeout(d time.Duration) option { + return func(config *testConfig) { + config.EventChanTimeout = d + } +} + +func withChanCap(cap int) option { + return func(config *testConfig) { + config.EventChanCap = cap + } +} + +func withEventTimeout(timeout time.Duration) option { + return func(config *testConfig) { + config.EventChanTimeout = timeout + } +} + +func withSpan(span roachpb.RSpan) option { + return func(config *testConfig) { + config.Span = span + } +} + +func withSettings(st *cluster.Settings) option { + return func(config *testConfig) { + config.Settings = st + } +} + +func withPushTxnsIntervalAge(interval, age time.Duration) option { + return func(config *testConfig) { + config.PushTxnsInterval = interval + config.PushTxnsAge = age + } +} + +// blockingScanner is a test intent scanner that allows test to track lifecycle +// of tasks. +// 1. it will always block on startup and will wait for block to be closed to +// proceed +// 2. when closed it will close done channel to signal completion +type blockingScanner struct { + wrapped IntentScanner + + block chan interface{} + done chan interface{} +} + +func (s *blockingScanner) ConsumeIntents( + ctx context.Context, startKey roachpb.Key, endKey roachpb.Key, consumer eventConsumer, +) error { + if s.block != nil { + select { + case <-s.block: + case <-ctx.Done(): + return ctx.Err() + } + } + return s.wrapped.ConsumeIntents(ctx, startKey, endKey, consumer) +} + +func (s *blockingScanner) Close() { + s.wrapped.Close() + close(s.done) +} + +func makeIntentScanner(data []storeOp, span roachpb.RSpan) (*blockingScanner, func(), error) { + engine, err := makeTestEngineWithData(data) + if err != nil { + return nil, nil, err + } + scanner, err := NewSeparatedIntentScanner(context.Background(), engine, span) + if err != nil { + return nil, nil, err + } + return &blockingScanner{ + wrapped: scanner, + block: make(chan interface{}), + done: make(chan interface{}), + }, func() { + engine.Close() + }, nil +} + +func newTestProcessor( + t testing.TB, opts ...option, +) (Processor, *processorTestHelper, *stop.Stopper) { + t.Helper() + stopper := stop.NewStopper() + st := cluster.MakeTestingClusterSettings() + + cfg := testConfig{ + Config: Config{ + RangeID: 2, + Stopper: stopper, + Settings: st, + AmbientContext: log.MakeTestingAmbientCtxWithNewTracer(), + Clock: hlc.NewClockForTesting(nil), + Span: roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}, + EventChanTimeout: testProcessorEventCTimeout, + EventChanCap: testProcessorEventCCap, + Metrics: NewMetrics(), + }, + } + for _, o := range opts { + o(&cfg) + } + if cfg.useScheduler { + sch := NewScheduler(SchedulerConfig{ + Workers: 1, + PriorityWorkers: 1, + Metrics: NewSchedulerMetrics(time.Second), + }) + require.NoError(t, sch.Start(context.Background(), stopper)) + cfg.Scheduler = sch + // Also create a dummy priority processor to populate priorityIDs for + // BenchmarkRangefeed. It should never be called. + noop := func(e processorEventType) processorEventType { + if e != Stopped { + t.Errorf("unexpected event %s for noop priority processor", e) + } + return 0 + } + require.NoError(t, sch.register(9, noop, true /* priority */)) + } + s := NewProcessor(cfg.Config) + h := processorTestHelper{} + switch p := s.(type) { + case *LegacyProcessor: + h.rts = &p.rts + h.span = p.Span + h.syncEventC = p.syncEventC + h.sendSpanSync = func(span *roachpb.Span) { + p.syncEventCWithEvent(&syncEvent{c: make(chan struct{}), testRegCatchupSpan: span}) + } + case *ScheduledProcessor: + h.rts = &p.rts + h.span = p.Span + h.syncEventC = p.syncEventC + h.sendSpanSync = func(span *roachpb.Span) { + p.syncSendAndWait(&syncEvent{c: make(chan struct{}), testRegCatchupSpan: span}) + } + h.scheduler = &p.scheduler + default: + panic("unknown processor type") + } + require.NoError(t, s.Start(stopper, cfg.isc)) + return s, &h, stopper +} diff --git a/pkg/kv/kvserver/rangefeed/processor_test.go b/pkg/kv/kvserver/rangefeed/processor_test.go index 7217af9ba228..392fb78dff5b 100644 --- a/pkg/kv/kvserver/rangefeed/processor_test.go +++ b/pkg/kv/kvserver/rangefeed/processor_test.go @@ -27,441 +27,17 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" - "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func makeLogicalOp(val interface{}) enginepb.MVCCLogicalOp { - var op enginepb.MVCCLogicalOp - op.MustSetValue(val) - return op -} - -func writeValueOpWithKV(key roachpb.Key, ts hlc.Timestamp, val []byte) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCWriteValueOp{ - Key: key, - Timestamp: ts, - Value: val, - }) -} - -func writeValueOpWithPrevValue( - key roachpb.Key, ts hlc.Timestamp, val, prevValue []byte, -) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCWriteValueOp{ - Key: key, - Timestamp: ts, - Value: val, - PrevValue: prevValue, - }) -} - -func writeValueOp(ts hlc.Timestamp) enginepb.MVCCLogicalOp { - return writeValueOpWithKV(roachpb.Key("a"), ts, []byte("val")) -} - -func writeIntentOpWithDetails( - txnID uuid.UUID, key []byte, iso isolation.Level, minTS, ts hlc.Timestamp, -) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCWriteIntentOp{ - TxnID: txnID, - TxnKey: key, - TxnIsoLevel: iso, - TxnMinTimestamp: minTS, - Timestamp: ts, - }) -} - -func writeIntentOpFromMeta(txn enginepb.TxnMeta) enginepb.MVCCLogicalOp { - return writeIntentOpWithDetails( - txn.ID, txn.Key, txn.IsoLevel, txn.MinTimestamp, txn.WriteTimestamp) -} - -func writeIntentOpWithKey( - txnID uuid.UUID, key []byte, iso isolation.Level, ts hlc.Timestamp, -) enginepb.MVCCLogicalOp { - return writeIntentOpWithDetails(txnID, key, iso, ts /* minTS */, ts) -} - -func writeIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { - return writeIntentOpWithKey(txnID, nil /* key */, 0, ts) -} - -func updateIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCUpdateIntentOp{ - TxnID: txnID, - Timestamp: ts, - }) -} - -func commitIntentOpWithKV( - txnID uuid.UUID, - key roachpb.Key, - ts hlc.Timestamp, - val []byte, - omitInRangefeeds bool, - originID uint32, -) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCCommitIntentOp{ - TxnID: txnID, - Key: key, - Timestamp: ts, - Value: val, - OmitInRangefeeds: omitInRangefeeds, - OriginID: originID, - }) -} - -func commitIntentOpWithPrevValue( - txnID uuid.UUID, key roachpb.Key, ts hlc.Timestamp, val, prevValue []byte, omitInRangefeeds bool, -) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCCommitIntentOp{ - TxnID: txnID, - Key: key, - Timestamp: ts, - Value: val, - PrevValue: prevValue, - OmitInRangefeeds: omitInRangefeeds, - }) -} - -func commitIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { - return commitIntentOpWithKV(txnID, roachpb.Key("a"), ts, nil /* val */, false /* omitInRangefeeds */, 0 /* originID */) -} - -func abortIntentOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCAbortIntentOp{ - TxnID: txnID, - }) -} - -func abortTxnOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCAbortTxnOp{ - TxnID: txnID, - }) -} - -func deleteRangeOp(startKey, endKey roachpb.Key, timestamp hlc.Timestamp) enginepb.MVCCLogicalOp { - return makeLogicalOp(&enginepb.MVCCDeleteRangeOp{ - StartKey: startKey, - EndKey: endKey, - Timestamp: timestamp, - }) -} - -func makeRangeFeedEvent(val interface{}) *kvpb.RangeFeedEvent { - var event kvpb.RangeFeedEvent - event.MustSetValue(val) - return &event -} - -func rangeFeedValueWithPrev(key roachpb.Key, val, prev roachpb.Value) *kvpb.RangeFeedEvent { - return makeRangeFeedEvent(&kvpb.RangeFeedValue{ - Key: key, - Value: val, - PrevValue: prev, - }) -} - -func rangeFeedValue(key roachpb.Key, val roachpb.Value) *kvpb.RangeFeedEvent { - return rangeFeedValueWithPrev(key, val, roachpb.Value{}) -} - -func rangeFeedCheckpoint(span roachpb.Span, ts hlc.Timestamp) *kvpb.RangeFeedEvent { - return makeRangeFeedEvent(&kvpb.RangeFeedCheckpoint{ - Span: span, - ResolvedTS: ts, - }) -} - -type storeOp struct { - kv storage.MVCCKeyValue - txn *roachpb.Transaction -} - -func makeTestEngineWithData(ops []storeOp) (storage.Engine, error) { - ctx := context.Background() - engine := storage.NewDefaultInMemForTesting() - for _, op := range ops { - kv := op.kv - _, err := storage.MVCCPut(ctx, engine, kv.Key.Key, kv.Key.Timestamp, roachpb.Value{RawBytes: kv.Value}, storage.MVCCWriteOptions{Txn: op.txn}) - if err != nil { - engine.Close() - return nil, err - } - } - return engine, nil -} - -const testProcessorEventCCap = 16 -const testProcessorEventCTimeout = 10 * time.Millisecond - -type processorTestHelper struct { - span roachpb.RSpan - rts *resolvedTimestamp - syncEventC func() - sendSpanSync func(*roachpb.Span) - scheduler *ClientScheduler -} - -// syncEventAndRegistrations waits for all previously sent events to be -// processed *and* for all registration output loops to fully process their own -// internal buffers. -func (h *processorTestHelper) syncEventAndRegistrations() { - h.sendSpanSync(&all) -} - -// syncEventAndRegistrations waits for all previously sent events to be -// processed *and* for matching registration output loops to fully process their -// own internal buffers. -func (h *processorTestHelper) syncEventAndRegistrationsSpan(span roachpb.Span) { - h.sendSpanSync(&span) -} - -// triggerTxnPushUntilPushed will schedule PushTxnQueued events until pushedC -// indicates that a transaction push attempt has started by posting an event. -// If a push does not happen in 10 seconds, the attempt fails. -func (h *processorTestHelper) triggerTxnPushUntilPushed(t *testing.T, pushedC <-chan struct{}) { - timeoutC := time.After(10 * time.Second) - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - for { - if h.scheduler != nil { - h.scheduler.Enqueue(PushTxnQueued) - } - select { - case <-pushedC: - return - case <-ticker.C: - // We keep sending events to avoid the situation where event arrives - // but flag indicating that push is still running is not reset. - case <-timeoutC: - t.Fatal("failed to get txn push notification") - } - } -} - -type procType bool - -const ( - legacyProcessor procType = false - schedulerProcessor = true -) - -var testTypes = []procType{legacyProcessor, schedulerProcessor} - -func (t procType) String() string { - if t { - return "scheduler" - } - return "legacy" -} - -type testConfig struct { - Config - useScheduler bool - isc IntentScannerConstructor -} - -type option func(*testConfig) - -func withPusher(txnPusher TxnPusher) option { - return func(config *testConfig) { - config.PushTxnsInterval = 10 * time.Millisecond - config.PushTxnsAge = 50 * time.Millisecond - config.TxnPusher = txnPusher - } -} - -func withProcType(t procType) option { - return func(config *testConfig) { - config.useScheduler = bool(t) - } -} - -func withBudget(b *FeedBudget) option { - return func(config *testConfig) { - config.MemBudget = b - } -} - -func withMetrics(m *Metrics) option { - return func(config *testConfig) { - config.Metrics = m - } -} - -func withRtsScanner(scanner IntentScanner) option { - return func(config *testConfig) { - if scanner != nil { - config.isc = func() IntentScanner { - return scanner - } - } - } -} - -func withChanTimeout(d time.Duration) option { - return func(config *testConfig) { - config.EventChanTimeout = d - } -} - -func withChanCap(cap int) option { - return func(config *testConfig) { - config.EventChanCap = cap - } -} - -func withEventTimeout(timeout time.Duration) option { - return func(config *testConfig) { - config.EventChanTimeout = timeout - } -} - -func withSpan(span roachpb.RSpan) option { - return func(config *testConfig) { - config.Span = span - } -} - -func withSettings(st *cluster.Settings) option { - return func(config *testConfig) { - config.Settings = st - } -} - -func withPushTxnsIntervalAge(interval, age time.Duration) option { - return func(config *testConfig) { - config.PushTxnsInterval = interval - config.PushTxnsAge = age - } -} - -// blockingScanner is a test intent scanner that allows test to track lifecycle -// of tasks. -// 1. it will always block on startup and will wait for block to be closed to -// proceed -// 2. when closed it will close done channel to signal completion -type blockingScanner struct { - wrapped IntentScanner - - block chan interface{} - done chan interface{} -} - -func (s *blockingScanner) ConsumeIntents( - ctx context.Context, startKey roachpb.Key, endKey roachpb.Key, consumer eventConsumer, -) error { - if s.block != nil { - select { - case <-s.block: - case <-ctx.Done(): - return ctx.Err() - } - } - return s.wrapped.ConsumeIntents(ctx, startKey, endKey, consumer) -} - -func (s *blockingScanner) Close() { - s.wrapped.Close() - close(s.done) -} - -func makeIntentScanner(data []storeOp, span roachpb.RSpan) (*blockingScanner, func(), error) { - engine, err := makeTestEngineWithData(data) - if err != nil { - return nil, nil, err - } - scanner, err := NewSeparatedIntentScanner(context.Background(), engine, span) - if err != nil { - return nil, nil, err - } - return &blockingScanner{ - wrapped: scanner, - block: make(chan interface{}), - done: make(chan interface{}), - }, func() { - engine.Close() - }, nil -} - -func newTestProcessor( - t testing.TB, opts ...option, -) (Processor, *processorTestHelper, *stop.Stopper) { - t.Helper() - stopper := stop.NewStopper() - st := cluster.MakeTestingClusterSettings() - - cfg := testConfig{ - Config: Config{ - RangeID: 2, - Stopper: stopper, - Settings: st, - AmbientContext: log.MakeTestingAmbientCtxWithNewTracer(), - Clock: hlc.NewClockForTesting(nil), - Span: roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}, - EventChanTimeout: testProcessorEventCTimeout, - EventChanCap: testProcessorEventCCap, - Metrics: NewMetrics(), - }, - } - for _, o := range opts { - o(&cfg) - } - if cfg.useScheduler { - sch := NewScheduler(SchedulerConfig{ - Workers: 1, - PriorityWorkers: 1, - Metrics: NewSchedulerMetrics(time.Second), - }) - require.NoError(t, sch.Start(context.Background(), stopper)) - cfg.Scheduler = sch - // Also create a dummy priority processor to populate priorityIDs for - // BenchmarkRangefeed. It should never be called. - noop := func(e processorEventType) processorEventType { - if e != Stopped { - t.Errorf("unexpected event %s for noop priority processor", e) - } - return 0 - } - require.NoError(t, sch.register(9, noop, true /* priority */)) - } - s := NewProcessor(cfg.Config) - h := processorTestHelper{} - switch p := s.(type) { - case *LegacyProcessor: - h.rts = &p.rts - h.span = p.Span - h.syncEventC = p.syncEventC - h.sendSpanSync = func(span *roachpb.Span) { - p.syncEventCWithEvent(&syncEvent{c: make(chan struct{}), testRegCatchupSpan: span}) - } - case *ScheduledProcessor: - h.rts = &p.rts - h.span = p.Span - h.syncEventC = p.syncEventC - h.sendSpanSync = func(span *roachpb.Span) { - p.syncSendAndWait(&syncEvent{c: make(chan struct{}), testRegCatchupSpan: span}) - } - h.scheduler = &p.scheduler - default: - panic("unknown processor type") - } - require.NoError(t, s.Start(stopper, cfg.isc)) - return s, &h, stopper -} - func TestProcessorBasic(t *testing.T) { defer leaktest.AfterTest(t)() testutils.RunValues(t, "proc type", testTypes, func(t *testing.T, pt procType) { @@ -1766,9 +1342,9 @@ func newConsumer(blockAfter int) *consumer { } } -func (c *consumer) SendIsThreadSafe() {} +func (c *consumer) SendUnbufferedIsThreadSafe() {} -func (c *consumer) Send(e *kvpb.RangeFeedEvent) error { +func (c *consumer) SendUnbuffered(e *kvpb.RangeFeedEvent) error { if e.Val != nil { v := int(atomic.AddInt32(&c.sentValues, 1)) if v == c.blockAfter { diff --git a/pkg/kv/kvserver/rangefeed/registry.go b/pkg/kv/kvserver/rangefeed/registry.go index 1337b2acf268..1529657b0a12 100644 --- a/pkg/kv/kvserver/rangefeed/registry.go +++ b/pkg/kv/kvserver/rangefeed/registry.go @@ -22,16 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" ) -// Stream is a object capable of transmitting RangeFeedEvents. -type Stream interface { - kvpb.RangeFeedEventSink - // Disconnect disconnects the stream with the provided error. Note that this - // function can be called by the processor worker while holding raftMu, so it - // is important that this function doesn't block IO or try acquiring locks - // that could lead to deadlocks. - Disconnect(err *kvpb.Error) -} - // registration defines an interface for registration that can be added to a // processor registry. Implemented by bufferedRegistration. type registration interface { diff --git a/pkg/kv/kvserver/rangefeed/registry_test.go b/pkg/kv/kvserver/rangefeed/registry_test.go index 96d5802d117e..addfffa25429 100644 --- a/pkg/kv/kvserver/rangefeed/registry_test.go +++ b/pkg/kv/kvserver/rangefeed/registry_test.go @@ -66,9 +66,9 @@ func (s *testStream) Cancel() { s.ctxDone() } -func (s *testStream) SendIsThreadSafe() {} +func (s *testStream) SendUnbufferedIsThreadSafe() {} -func (s *testStream) Send(e *kvpb.RangeFeedEvent) error { +func (s *testStream) SendUnbuffered(e *kvpb.RangeFeedEvent) error { s.mu.Lock() defer s.mu.Unlock() if s.mu.sendErr != nil { diff --git a/pkg/kv/kvserver/rangefeed/scheduled_processor.go b/pkg/kv/kvserver/rangefeed/scheduled_processor.go index 9f484c6f30a2..c883c881539d 100644 --- a/pkg/kv/kvserver/rangefeed/scheduled_processor.go +++ b/pkg/kv/kvserver/rangefeed/scheduled_processor.go @@ -316,16 +316,22 @@ func (p *ScheduledProcessor) Register( p.syncEventC() blockWhenFull := p.Config.EventChanTimeout == 0 // for testing - r := newBufferedRegistration( - span.AsRawSpanWithNoLocals(), startTS, catchUpIter, withDiff, withFiltering, withOmitRemote, - p.Config.EventChanCap, blockWhenFull, p.Metrics, stream, disconnectFn, - ) + var r registration + if _, ok := stream.(BufferedStream); ok { + log.Fatalf(context.Background(), + "unimplemented: unbuffered registrations for rangefeed, see #126560") + } else { + r = newBufferedRegistration( + span.AsRawSpanWithNoLocals(), startTS, catchUpIter, withDiff, withFiltering, withOmitRemote, + p.Config.EventChanCap, blockWhenFull, p.Metrics, stream, disconnectFn, + ) + } filter := runRequest(p, func(ctx context.Context, p *ScheduledProcessor) *Filter { if p.stopping { return nil } - if !p.Span.AsRawSpanWithNoLocals().Contains(r.span) { + if !p.Span.AsRawSpanWithNoLocals().Contains(r.getSpan()) { log.Fatalf(ctx, "registration %s not in Processor's key range %v", r, p.Span) } @@ -348,8 +354,8 @@ func (p *ScheduledProcessor) Register( if p.unregisterClient(r) { // unreg callback is set by replica to tear down processors that have // zero registrations left and to update event filters. - if r.unreg != nil { - r.unreg() + if f := r.getUnreg(); f != nil { + f() } } } diff --git a/pkg/kv/kvserver/rangefeed/stream_muxer_test_helper.go b/pkg/kv/kvserver/rangefeed/sender_helper_test.go similarity index 100% rename from pkg/kv/kvserver/rangefeed/stream_muxer_test_helper.go rename to pkg/kv/kvserver/rangefeed/sender_helper_test.go diff --git a/pkg/kv/kvserver/rangefeed/stream.go b/pkg/kv/kvserver/rangefeed/stream.go new file mode 100644 index 000000000000..f5da943c9d64 --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/stream.go @@ -0,0 +1,101 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/roachpb" +) + +// Stream is an object capable of transmitting RangeFeedEvents from a server +// rangefeed to a client. +type Stream interface { + kvpb.RangeFeedEventSink + // Disconnect disconnects the stream with the provided error. Note that this + // function can be called by the processor worker while holding raftMu, so it + // is important that this function doesn't block IO or try acquiring locks + // that could lead to deadlocks. + Disconnect(err *kvpb.Error) +} + +// PerRangeEventSink is an implementation of Stream which annotates each +// response with rangeID and streamID. It is used by MuxRangeFeed. +type PerRangeEventSink struct { + ctx context.Context + rangeID roachpb.RangeID + streamID int64 + wrapped *UnbufferedSender +} + +func NewPerRangeEventSink( + ctx context.Context, rangeID roachpb.RangeID, streamID int64, wrapped *UnbufferedSender, +) *PerRangeEventSink { + return &PerRangeEventSink{ + ctx: ctx, + rangeID: rangeID, + streamID: streamID, + wrapped: wrapped, + } +} + +var _ kvpb.RangeFeedEventSink = (*PerRangeEventSink)(nil) +var _ Stream = (*PerRangeEventSink)(nil) + +func (s *PerRangeEventSink) Context() context.Context { + return s.ctx +} + +// SendUnbufferedIsThreadSafe is a no-op declaration method. It is a contract +// that the SendUnbuffered method is thread-safe. Note that +// UnbufferedSender.SendUnbuffered is thread-safe. +func (s *PerRangeEventSink) SendUnbufferedIsThreadSafe() {} + +func (s *PerRangeEventSink) SendUnbuffered(event *kvpb.RangeFeedEvent) error { + response := &kvpb.MuxRangeFeedEvent{ + RangeFeedEvent: *event, + RangeID: s.rangeID, + StreamID: s.streamID, + } + return s.wrapped.SendUnbuffered(response) +} + +// Disconnect implements the Stream interface. It requests the UnbufferedSender +// to detach the stream. The UnbufferedSender is then responsible for handling +// the actual disconnection and additional cleanup. Note that Caller should not +// rely on immediate disconnection as cleanup takes place async. +func (s *PerRangeEventSink) Disconnect(err *kvpb.Error) { + ev := &kvpb.MuxRangeFeedEvent{ + RangeID: s.rangeID, + StreamID: s.streamID, + } + ev.MustSetValue(&kvpb.RangeFeedError{ + Error: *transformRangefeedErrToClientError(err), + }) + s.wrapped.SendBufferedError(ev) +} + +// transformRangefeedErrToClientError converts a rangefeed error to a client +// error to be sent back to client. This also handles nil values, preventing nil +// pointer dereference. +// +// NB: when processor.Stop() is called (stopped when it no longer has any +// registrations, it would attempt to close all feeds again with a nil error). +// Theoretically, this should never happen as processor would always stop with a +// reason if feeds are active. +func transformRangefeedErrToClientError(err *kvpb.Error) *kvpb.Error { + if err == nil { + return kvpb.NewError( + kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED)) + } + return err +} diff --git a/pkg/kv/kvserver/rangefeed/stream_muxer.go b/pkg/kv/kvserver/rangefeed/stream_muxer.go deleted file mode 100644 index c85fa2dea8b2..000000000000 --- a/pkg/kv/kvserver/rangefeed/stream_muxer.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2024 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package rangefeed - -import ( - "context" - "sync" - - "github.com/cockroachdb/cockroach/pkg/kv/kvpb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/stop" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" -) - -// RangefeedMetricsRecorder is an interface for recording rangefeed metrics. -type RangefeedMetricsRecorder interface { - UpdateMetricsOnRangefeedConnect() - UpdateMetricsOnRangefeedDisconnect() -} - -// ServerStreamSender forwards MuxRangefeedEvents from StreamMuxer to the -// underlying stream. -type ServerStreamSender interface { - // Send must be thread-safe to be called concurrently. - Send(*kvpb.MuxRangeFeedEvent) error - // SendIsThreadSafe is a no-op declaration method. It is a contract that the - // interface has a thread-safe Send method. - SendIsThreadSafe() -} - -// StreamMuxer is responsible for managing a set of active rangefeed streams and -// forwarding rangefeed completion errors to the client. -// -// ┌───────────────────────────┐ -// │ DistSender.RangefeedSpans │ rangefeedMuxer -// └───────────────────────────┘ -// │ divideAllSpansOnRangeBoundaries -// ┌───────────────────────────┬───────────────────────────┐ -// ▼ ▼ ▼ -// ┌────────────────────┐ ┌────────────────────┐ ┌────────────────────┐ -// │ rangefeedMuxer │ │ rangefeedMuxer │ │ rangefeedMuxer │ -// │startSingleRangefeed│ │startSingleRangefeed│ │startSingleRangefeed│ -// └─────────┬──────────┘ └──────────┬─────────┘ └──────────┬─────────┘ -// ▼ ▼ ▼ -// new streamID new streamID new streamID -// ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ -// │RangefeedRequest│ │RangefeedRequest│ │RangefeedRequest│ -// └────────────────┘ └────────────────┘ └────────────────┘ -// rangefeedMuxer rangefeedMuxer rangefeedMuxer -// establishMuxConnection establishMuxConnection establishMuxConnection -// │ │ │ -// ▼ ▼ ▼ -// rangefeedMuxer.startNodeMuxRangefeed rangefeedMuxer.startNodeMuxRangefeed -// rangefeedMuxer.receiveEventsFromNode rangefeedMuxer.receiveEventsFromNode -// ┌─────────────────────────────────────────┐ ┌─────────────────────────────────────────┐ -// │rpc.RestrictedInternalClient.MuxRangeFeed│ │rpc.RestrictedInternalClient.MuxRangeFeed│ -// └─────────────┬────────────▲──────────────┘ └─────────────────────────────────────────┘ -// kvpb.RangefeedRequest │ │ kvpb.MuxRangefeedEvent -// ┌─────────────▼────────────┴──────────────┐ -// │ Node.MuxRangeFeed │◄───────────────── MuxRangefeedEvent with kvpb.RangeFeedError -// └─────────────────┬───▲───────────────────┘ (client: rangefeedMuxer.restartActiveRangeFeed) -// StreamMuxer.AddStream │ │LockedMuxStream.Send(*kvpb.MuxRangefeedEvent) │ -// ┌────▼───┴────┐ │ -// │ StreamMuxer ├────────────────────────────────────┬─────────────────────────────┐ -// └──────┬──────┘ │ │ -// │ │ │ -// ┌────────▼─────────┐ │ │ -// │ Stores.Rangefeed │ │ │ -// └────────┬─────────┘ │ │ -// │ │ │ -// ┌───────▼─────────┐ StreamMuxer StreamMuxer -// │ Store.Rangefeed │ RegisterRangefeedCleanUp DisconnectStreamWithError -// └───────┬─────────┘ ▲ ▲ -// │ │ │ -// ┌────────▼──────────┐ │ │ -// │ Replica.Rangefeed │ │ │ -// └────────┬──────────┘ │ │ -// │ │ │ -// ┌──────▼───────┐ │ │ -// │ Registration ├───────────────────────────────────┘ │ -// └──────┬───────┘ ScheduledProcessor.Register │ -// │ │ -// └─────────────────────────────────────────────────────────────────────────┘ -// registration.disconnect -type StreamMuxer struct { - // taskCancel is a function to cancel StreamMuxer.run spawned in the - // background. It is called by StreamMuxer.Stop. It is expected to be called - // after StreamMuxer.Start. - taskCancel context.CancelFunc - - // wg is used to coordinate async tasks spawned by StreamMuxer. Currently, - // there is only one task spawned by StreamMuxer.Start (StreamMuxer.run). - wg sync.WaitGroup - - // errCh is used to signal errors from StreamMuxer.run back to the caller. If - // non-empty, the StreamMuxer.run is finished and error should be handled. - // Note that it is possible for StreamMuxer.run to be finished without sending - // an error to errCh. Other goroutines are expected to receive the same - // shutdown signal in this case and handle error appropriately. - errCh chan error - - // Note that lockedMuxStream wraps the underlying grpc server stream, ensuring - // thread safety. - sender ServerStreamSender - - // metrics is used to record rangefeed metrics for the node. - metrics RangefeedMetricsRecorder - - // streamID -> streamInfo for active rangefeeds - activeStreams syncutil.Map[int64, streamInfo] - - // notifyMuxError is a buffered channel of size 1 used to signal the presence - // of muxErrors. Additional signals are dropped if the channel is already full - // so that it's non-blocking. - notifyMuxError chan struct{} - - mu struct { - syncutil.Mutex - // muxErrors is a slice of mux rangefeed completion errors to be sent back - // to the client. Upon receiving the error, the client restart rangefeed - // when possible. - muxErrors []*kvpb.MuxRangeFeedEvent - } -} - -// NewStreamMuxer creates a new StreamMuxer. There should only one for each -// incoming node.MuxRangefeed RPC stream. -func NewStreamMuxer(sender ServerStreamSender, metrics RangefeedMetricsRecorder) *StreamMuxer { - return &StreamMuxer{ - sender: sender, - metrics: metrics, - notifyMuxError: make(chan struct{}, 1), - } -} - -// streamInfo contains the rangeID and cancel function for an active rangefeed. -// It should be treated as immutable. -type streamInfo struct { - rangeID roachpb.RangeID - cancel context.CancelFunc -} - -// AddStream registers a server rangefeed stream with the StreamMuxer. It -// remains active until DisconnectStreamWithError is called with the same -// streamID. Caller must ensure no duplicate stream IDs are added without -// disconnecting the old one first. -func (sm *StreamMuxer) AddStream( - streamID int64, rangeID roachpb.RangeID, cancel context.CancelFunc, -) { - if _, loaded := sm.activeStreams.LoadOrStore(streamID, &streamInfo{ - rangeID: rangeID, - cancel: cancel, - }); loaded { - log.Fatalf(context.Background(), "stream %d already exists", streamID) - } - sm.metrics.UpdateMetricsOnRangefeedConnect() -} - -// SendIsThreadSafe is a no-op declaration method. It is a contract that the -// Send method is thread-safe. Note that Send wraps ServerStreamSender which -// also declares its Send method to be thread-safe. -func (sm *StreamMuxer) SendIsThreadSafe() {} - -func (sm *StreamMuxer) Send(e *kvpb.MuxRangeFeedEvent) error { - return sm.sender.Send(e) -} - -// transformRangefeedErrToClientError converts a rangefeed error to a client -// error to be sent back to client. This also handles nil values, preventing nil -// pointer dereference. -func transformRangefeedErrToClientError(err *kvpb.Error) *kvpb.Error { - if err == nil { - // When processor is stopped when it no longer has any registrations, it - // would attempt to close all feeds again with a nil error. Theoretically, - // this should never happen as processor would always stop with a reason if - // feeds are active. - return kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED)) - } - return err -} - -// appendMuxError appends a mux rangefeed completion error to be sent back to -// the client. Note that this method cannot block on IO. If the underlying -// stream is broken, the error will be dropped. -func (sm *StreamMuxer) appendMuxError(e *kvpb.MuxRangeFeedEvent) { - sm.mu.Lock() - defer sm.mu.Unlock() - sm.mu.muxErrors = append(sm.mu.muxErrors, e) - // Note that notifyMuxError is non-blocking. - select { - case sm.notifyMuxError <- struct{}{}: - default: - } -} - -// DisconnectStreamWithError disconnects a stream with an error. Safe to call -// repeatedly for the same stream, but subsequent errors are ignored. It ensures -// 1. the stream context is cancelled 2. exactly one error is sent back to the -// client on behalf of the stream. -// -// Note that this function can be called by the processor worker while holding -// raftMu, so it is important that this function doesn't block IO. It does so by -// delegating the responsibility of sending mux error to StreamMuxer.run. -func (sm *StreamMuxer) DisconnectStreamWithError( - streamID int64, rangeID roachpb.RangeID, err *kvpb.Error, -) { - if stream, ok := sm.activeStreams.LoadAndDelete(streamID); ok { - // Fine to skip nil checking here since that would be a programming error. - stream.cancel() - clientErrorEvent := transformRangefeedErrToClientError(err) - ev := &kvpb.MuxRangeFeedEvent{ - StreamID: streamID, - RangeID: rangeID, - } - ev.MustSetValue(&kvpb.RangeFeedError{ - Error: *clientErrorEvent, - }) - sm.appendMuxError(ev) - sm.metrics.UpdateMetricsOnRangefeedDisconnect() - } -} - -// detachMuxErrors returns muxErrors and clears the slice. Caller must ensure -// the returned errors are sent back to the client. -func (sm *StreamMuxer) detachMuxErrors() []*kvpb.MuxRangeFeedEvent { - sm.mu.Lock() - defer sm.mu.Unlock() - toSend := sm.mu.muxErrors - sm.mu.muxErrors = nil - return toSend -} - -// run forwards rangefeed completion errors back to the client. run is expected -// to be called in a goroutine and will block until the context is done or the -// stopper is quiesced. StreamMuxer will stop forward rangefeed completion -// errors after run completes, and caller is responsible for handling shutdown. -func (sm *StreamMuxer) run(ctx context.Context, stopper *stop.Stopper) error { - for { - select { - case <-sm.notifyMuxError: - toSend := sm.detachMuxErrors() - for _, clientErr := range toSend { - if err := sm.sender.Send(clientErr); err != nil { - log.Errorf(ctx, - "failed to send rangefeed completion error back to client due to broken stream: %v", err) - return err - } - } - case <-ctx.Done(): - // Top level goroutine will receive the context cancellation and handle - // ctx.Err(). - return nil - case <-stopper.ShouldQuiesce(): - // Top level goroutine will receive the stopper quiesce signal and handle - // error. - return nil - } - } -} - -// Error returns a channel that can be used to receive errors from -// StreamMuxer.run. Only non-nil errors are sent on this channel. If non-empty, -// streamMuxer.run is finished, and the caller is responsible for handling the -// error. -func (sm *StreamMuxer) Error() chan error { - if sm.errCh == nil { - log.Fatalf(context.Background(), "StreamMuxer.Error called before StreamMuxer.Start") - } - return sm.errCh -} - -// Stop cancels the StreamMuxer.run task and waits for it to complete. It does -// nothing if StreamMuxer.run is already finished. It is expected to be called -// after StreamMuxer.Start. Note that the caller is responsible for handling any -// cleanups for any active streams. -func (sm *StreamMuxer) Stop() { - sm.taskCancel() - sm.wg.Wait() -} - -// Start launches StreamMuxer.run in the background if no error is returned. -// StreamMuxer.run continues running until it errors or StreamMuxer.Stop is -// called. The caller is responsible for calling StreamMuxer.Stop and handle any -// cleanups for any active streams. Note that it is not valid to call Start -// multiple times or restart after Stop. Example usage: -// -// if err := streamMuxer.Start(ctx, stopper); err != nil { -// return err -// } -// -// defer streamMuxer.Stop() -func (sm *StreamMuxer) Start(ctx context.Context, stopper *stop.Stopper) error { - if sm.errCh != nil { - log.Fatalf(ctx, "StreamMuxer.Start called multiple times") - } - sm.errCh = make(chan error, 1) - ctx, sm.taskCancel = context.WithCancel(ctx) - sm.wg.Add(1) - if err := stopper.RunAsyncTask(ctx, "test-stream-muxer", func(ctx context.Context) { - defer sm.wg.Done() - if err := sm.run(ctx, stopper); err != nil { - sm.errCh <- err - } - }); err != nil { - sm.taskCancel() - sm.wg.Done() - return err - } - return nil -} diff --git a/pkg/kv/kvserver/rangefeed/test_helpers.go b/pkg/kv/kvserver/rangefeed/test_helpers.go new file mode 100644 index 000000000000..234221a5f402 --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/test_helpers.go @@ -0,0 +1,77 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "math/rand" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/hlc" +) + +// GenerateRandomizedTs generates a timestamp between 1 and ns nanoseconds. +func GenerateRandomizedTs(rand *rand.Rand, maxTime int64) hlc.Timestamp { + // Avoid generating zero timestamp which will equal to an empty event. + return hlc.Timestamp{WallTime: rand.Int63n(maxTime) + 1} +} + +func generateStartAndEndKey(rand *rand.Rand) (roachpb.Key, roachpb.Key) { + startKey, endKey, _ := generateStartAndEndKeyFromK(rand, 0) + return startKey, endKey +} + +// generateStartAndEndKey generates a start key at or above k, an end key at or +// above the start key, and returns the keys and the value of the end key. +func generateStartAndEndKeyFromK(rand *rand.Rand, k int) (roachpb.Key, roachpb.Key, int) { + start := k + rand.Intn(2<<20) + end := start + rand.Intn(2<<20) + startDatum := tree.NewDInt(tree.DInt(start)) + endDatum := tree.NewDInt(tree.DInt(end)) + const tableID = 42 + + startKey, err := keyside.Encode( + keys.SystemSQLCodec.TablePrefix(tableID), + startDatum, + encoding.Ascending, + ) + if err != nil { + panic(err) + } + + endKey, err := keyside.Encode( + keys.SystemSQLCodec.TablePrefix(tableID), + endDatum, + encoding.Ascending, + ) + if err != nil { + panic(err) + } + return startKey, endKey, end +} + +// GenerateRandomizedSpans generates n non-overlapping spans. +func GenerateRandomizedSpans(rand *rand.Rand, n int) []roachpb.RSpan { + spans := make([]roachpb.RSpan, 0, n) + var startKey, endKey roachpb.Key + k := 0 + for range n { + startKey, endKey, k = generateStartAndEndKeyFromK(rand, k) + spans = append(spans, roachpb.RSpan{ + Key: roachpb.RKey(startKey), + EndKey: roachpb.RKey(endKey), + }) + } + return spans +} diff --git a/pkg/kv/kvserver/rangefeed/unbuffered_sender.go b/pkg/kv/kvserver/rangefeed/unbuffered_sender.go new file mode 100644 index 000000000000..0512bd2abf63 --- /dev/null +++ b/pkg/kv/kvserver/rangefeed/unbuffered_sender.go @@ -0,0 +1,317 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package rangefeed + +import ( + "context" + "sync" + + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" +) + +// RangefeedMetricsRecorder is an interface for recording rangefeed metrics. +type RangefeedMetricsRecorder interface { + UpdateMetricsOnRangefeedConnect() + UpdateMetricsOnRangefeedDisconnect() +} + +// ServerStreamSender forwards MuxRangefeedEvents from UnbufferedSender to the +// underlying grpc stream. +type ServerStreamSender interface { + // Send must be thread-safe to be called concurrently. + Send(*kvpb.MuxRangeFeedEvent) error + // SendIsThreadSafe is a no-op declaration method. It is a contract that the + // interface has a thread-safe Send method. + SendIsThreadSafe() +} + +// ┌───────────────────────────┐ +// │ DistSender.RangefeedSpans │ rangefeedMuxer +// └───────────────────────────┘ +// │ divideAllSpansOnRangeBoundaries +// ┌───────────────────────────┬───────────────────────────┐ +// ▼ ▼ ▼ +// ┌────────────────────┐ ┌────────────────────┐ ┌────────────────────┐ +// │ rangefeedMuxer │ │ rangefeedMuxer │ │ rangefeedMuxer │ (client: rangefeedMuxer. +// │startSingleRangefeed│ │startSingleRangefeed│ │startSingleRangefeed│ restartActiveRangeFeed) +// └─────────┬──────────┘ └──────────┬─────────┘ └──────────┬─────────┘ ▲ +// ▼ ▼ ▼ │ +// new streamID new streamID new streamID │ +// ┌────────────────┐ ┌────────────────┐ ┌────────────────┐ │ +// │RangefeedRequest│ │RangefeedRequest│ │RangefeedRequest│ │ +// └────────────────┘ └────────────────┘ └────────────────┘ │ +// rangefeedMuxer rangefeedMuxer rangefeedMuxer │ +// establishMuxConnection establishMuxConnection establishMuxConnection │ +// │ │ │ │ +// ▼ ▼ ▼ │ +// rangefeedMuxer.startNodeMuxRangefeed rangefeedMuxer.startNodeMuxRangefeed │ +// rangefeedMuxer.receiveEventsFromNode rangefeedMuxer.receiveEventsFromNode │ +// ┌─────────────────────────────────────────┐ ┌─────────────────────────────────────────┐│ +// │rpc.RestrictedInternalClient.MuxRangeFeed│ │rpc.RestrictedInternalClient.MuxRangeFeed││ +// └─────────────┬────────────▲──────────────┘ └─────────────────────────────────────────┘│ +// kvpb.RangefeedRequest │ │ kvpb.MuxRangefeedEvent │ +// ┌───────────────▼────────────┴────────────┐ MuxRangefeedEvent +// │ Node.MuxRangeFeed │◄─────── MuxRangefeedEvent ────────── with kvpb.RangeFeedError +// └─────────────────┬───▲───────────────────┘ ▲ ▲ +// Sender.AddStream │ │LockedMuxStream.Send │ │ +// ┌────────────▼───┴──────────┐ │ │ +// │ Buffered/Unbuffered Sender├───────────┐ │ │ +// └────────────┬──────────────┘ │ │ │ +// │ │ │ │ +// ┌────────▼─────────┐ │ │ │ +// │ Stores.Rangefeed │ │ │ │ +// └────────┬─────────┘ │ │ │ +// │ │ │ │ +// ┌───────▼─────────┐ UnbufferedSender UnbufferedSender │ +// │ Store.Rangefeed │ SendUnbuffered SendBufferedError ─────► UnbufferedSender.run +// └───────┬─────────┘ ▲ ▲ ▲ +// │ │ │ │ +// ┌────────▼──────────┐ │ │ │ +// │ Replica.Rangefeed │ │ │ │ +// └────────┬──────────┘ │ │ │ +// │ │ │ │ +// ┌──────▼───────┐ │ │ │ +// │ Registration │ │ │ │ +// └──────┬───────┘ │ │ │ +// │ │ │ │ +// │ │ │ │ +// └─────────────────────────┘───────────────┘───────────────────────────────┘ +// PerRangeEventSink.Send PerRangeEventSink.Disconnect +// +// UnbufferedSender is embedded in every rangefeed.PerRangeEventSink, serving as +// a helper to forward events to the underlying gRPC stream. +// - For non-error events, SendUnbuffered is blocking until the event is sent. +// - For error events, SendBufferedError is non-blocking and ensures +// 1) stream context is canceled +// 2) exactly one error is sent back to the client on behalf of the stream +// 3) metrics updates. +// It makes sure SendBufferedError is non-blocking by delegating the +// responsibility of sending mux error to UnbufferedSender.run (in a separate +// goroutine). There should only be one UnbufferedSender per Node.MuxRangefeed. +type UnbufferedSender struct { + // taskCancel is a function to cancel UnbufferedSender.run spawned in the + // background. It is called by UnbufferedSender.Stop. It is expected to be + // called after UnbufferedSender.Start. + taskCancel context.CancelFunc + + // wg is used to coordinate async tasks spawned by UnbufferedSender. + // Currently, there is only one task spawned by UnbufferedSender.Start + // (UnbufferedSender.run). + wg sync.WaitGroup + + // errCh is used to signal errors from UnbufferedSender.run back to the + // caller. If non-empty, the UnbufferedSender.run is finished and error should + // be handled. Note that it is possible for UnbufferedSender.run to be + // finished without sending an error to errCh. Other goroutines are expected + // to receive the same shutdown signal in this case and handle error + // appropriately. + errCh chan error + + // Note that lockedMuxStream wraps the underlying grpc server stream, ensuring + // thread safety. + sender ServerStreamSender + + // notifyMuxError is a buffered channel of size 1 used to signal the presence + // of muxErrors. Additional signals are dropped if the channel is already full + // so that it's non-blocking. + notifyMuxError chan struct{} + + // streamID -> context cancellation + streams syncutil.Map[int64, context.CancelFunc] + + // metrics is used to record rangefeed metrics for the node. + metrics RangefeedMetricsRecorder + + mu struct { + syncutil.Mutex + // muxErrors is a slice of mux rangefeed completion errors to be sent back + // to the client. Upon receiving the error, the client restart rangefeed + // when possible. + muxErrors []*kvpb.MuxRangeFeedEvent + } +} + +func NewUnbufferedSender( + sender ServerStreamSender, metrics RangefeedMetricsRecorder, +) *UnbufferedSender { + return &UnbufferedSender{ + notifyMuxError: make(chan struct{}, 1), + sender: sender, + metrics: metrics, + } +} + +// SendBufferedError 1. Sends a mux rangefeed completion error to the +// client without blocking. It does so by delegating the responsibility of +// sending mux error to UnbufferedSender.run 2. Disconnects the stream with +// ev.StreamID. Safe to call repeatedly for the same stream, but subsequent +// errors are ignored. +// +// The error event is not sent immediately. It is deferred to +// UnbufferedSender.run (async). If a node level shutdown occurs (such as +// underlying grpc stream is broken), UnbufferedSender.run would return early, +// and the error may not be sent successfully. In that case, Node.MuxRangefeed +// would return, allowing clients to know rangefeed completions. +// +// Note that this function can be called by the processor worker while holding +// raftMu, so it is important that this function doesn't block on IO. Caller +// needs to make sure this is called only with non-nil error events. Important +// to be thread-safe. +func (ubs *UnbufferedSender) SendBufferedError(ev *kvpb.MuxRangeFeedEvent) { + if ev.Error == nil { + log.Fatalf(context.Background(), "unexpected: SendWithoutBlocking called with non-error event") + } + + if cancel, ok := ubs.streams.LoadAndDelete(ev.StreamID); ok { + // Fine to skip nil checking here since that would be a programming error. + (*cancel)() + ubs.metrics.UpdateMetricsOnRangefeedDisconnect() + ubs.appendMuxError(ev) + } +} + +// SendUnbuffered blocks until the event is sent to the underlying grpc stream. +// It should be only called for non-error events. If this function returns an +// error, caller must ensure that no further events are sent from +// rangefeed.Stream to avoid potential event loss. (NB: While subsequent Send +// should also return an error if one is encountered, let's play safe.) +// Important to be thread-safe. +func (ubs *UnbufferedSender) SendUnbuffered(event *kvpb.MuxRangeFeedEvent) error { + if event.Error != nil { + log.Fatalf(context.Background(), "unexpected: SendUnbuffered called with error event") + } + return ubs.sender.Send(event) +} + +// run forwards rangefeed completion errors back to the client. run is expected +// to be called in a goroutine and will block until the context is done or the +// stopper is quiesced. UnbufferedSender will stop forward rangefeed completion +// errors after run completes, but a node level shutdown from Node.MuxRangefeed +// should happen soon. +func (ubs *UnbufferedSender) run(ctx context.Context, stopper *stop.Stopper) error { + for { + select { + case <-ubs.notifyMuxError: + toSend := ubs.detachMuxErrors() + for _, clientErr := range toSend { + if err := ubs.sender.Send(clientErr); err != nil { + log.Errorf(ctx, + "failed to send rangefeed completion error back to client due to broken stream: %v", err) + return err + } + } + case <-ctx.Done(): + // Top level goroutine will receive the context cancellation and handle + // ctx.Err(). + return nil + case <-stopper.ShouldQuiesce(): + // Top level goroutine will receive the stopper quiesce signal and handle + // error. + return nil + } + } +} + +// appendMuxError appends a mux rangefeed completion error to be sent back to +// the client. Note that this method cannot block on IO. +func (ubs *UnbufferedSender) appendMuxError(e *kvpb.MuxRangeFeedEvent) { + ubs.mu.Lock() + defer ubs.mu.Unlock() + ubs.mu.muxErrors = append(ubs.mu.muxErrors, e) + // Note that notifyMuxError is non-blocking. + select { + case ubs.notifyMuxError <- struct{}{}: + default: + } +} + +// detachMuxErrors returns muxErrors and clears the slice. Caller must ensure +// the returned errors are sent back to the client. +func (ubs *UnbufferedSender) detachMuxErrors() []*kvpb.MuxRangeFeedEvent { + ubs.mu.Lock() + defer ubs.mu.Unlock() + toSend := ubs.mu.muxErrors + ubs.mu.muxErrors = nil + return toSend +} + +// Start launches UnbufferedSender.run in the background if no error is +// returned. UnbufferedSender.run continues running until it errors or +// UnbufferedSender.Stop is called. The caller is responsible for calling +// UnbufferedSender.Stop and handle any cleanups for any active streams. Note +// that it is not valid to call Start multiple times or restart after Stop. +// Example usage: +// +// if err := UnbufferedSender.Start(ctx, stopper); err != nil { +// return err +// } +// +// defer UnbufferedSender.Stop() +func (ubs *UnbufferedSender) Start(ctx context.Context, stopper *stop.Stopper) error { + if ubs.errCh != nil { + log.Fatalf(ctx, "UnbufferedSender.Start called multiple times") + } + ubs.errCh = make(chan error, 1) + ctx, ubs.taskCancel = context.WithCancel(ctx) + ubs.wg.Add(1) + if err := stopper.RunAsyncTask(ctx, "unbuffered sender", func(ctx context.Context) { + defer ubs.wg.Done() + if err := ubs.run(ctx, stopper); err != nil { + ubs.errCh <- err + } + }); err != nil { + ubs.taskCancel() + ubs.wg.Done() + return err + } + return nil +} + +// Error returns a channel that can be used to receive errors from +// UnbufferedSender.run. Only non-nil errors are sent on this channel. If +// non-empty, UnbufferedSender.run is finished, and the caller is responsible +// for handling the error. +func (ubs *UnbufferedSender) Error() chan error { + if ubs.errCh == nil { + log.Fatalf(context.Background(), "UnbufferedSender.Error called before UnbufferedSender.Start") + } + return ubs.errCh +} + +// Stop cancels the UnbufferedSender.run task and waits for it to complete. It +// does nothing if UnbufferedSender.run is already finished. It is expected to +// be called after UnbufferedSender.Start. Note that the caller is responsible +// for handling any cleanups for any active streams or mux errors that are not +// sent back successfully. +func (ubs *UnbufferedSender) Stop() { + ubs.taskCancel() + ubs.wg.Wait() + + // It is okay to not clean up mux errors here since node level shutdown is + // happening. It is also okay to not disconnect all active streams (context + // cancellation, decrement metrics here since SendBufferedError will be called + // again by rangefeed.Stream after that. No errors will be sent but metrics + // cleanup will still happen during SendBufferedError. +} + +// AddStream registers a rangefeed.Stream with UnbufferedSender. It remains +// active until SendBufferedError is called with the same streamID. +// Caller must ensure no duplicate stream IDs are added without disconnecting +// the old one first. +func (ubs *UnbufferedSender) AddStream(streamID int64, cancel context.CancelFunc) { + if _, loaded := ubs.streams.LoadOrStore(streamID, &cancel); loaded { + log.Fatalf(context.Background(), "stream %d already exists", streamID) + } + ubs.metrics.UpdateMetricsOnRangefeedConnect() +} diff --git a/pkg/kv/kvserver/rangefeed/stream_muxer_test.go b/pkg/kv/kvserver/rangefeed/unbuffered_sender_test.go similarity index 63% rename from pkg/kv/kvserver/rangefeed/stream_muxer_test.go rename to pkg/kv/kvserver/rangefeed/unbuffered_sender_test.go index e7c1995ba080..c11026447776 100644 --- a/pkg/kv/kvserver/rangefeed/stream_muxer_test.go +++ b/pkg/kv/kvserver/rangefeed/unbuffered_sender_test.go @@ -27,9 +27,22 @@ import ( "github.com/stretchr/testify/require" ) -// TestStreamMuxer tests that correctly forwards rangefeed completion errors to -// the server stream. -func TestStreamMuxer(t *testing.T) { +func makeMuxRangefeedErrorEvent( + streamID int64, rangeID roachpb.RangeID, err *kvpb.Error, +) *kvpb.MuxRangeFeedEvent { + ev := &kvpb.MuxRangeFeedEvent{ + StreamID: streamID, + RangeID: rangeID, + } + ev.MustSetValue(&kvpb.RangeFeedError{ + Error: *transformRangefeedErrToClientError(err), + }) + return ev +} + +// TestUnbufferedSenderDisconnect tests that correctly forwards rangefeed +// completion errors to the server stream. +func TestUnbufferedSenderDisconnect(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -40,18 +53,19 @@ func TestStreamMuxer(t *testing.T) { testServerStream := newTestServerStream() testRangefeedCounter := newTestRangefeedCounter() - muxer := NewStreamMuxer(testServerStream, testRangefeedCounter) - require.NoError(t, muxer.Start(ctx, stopper)) - defer muxer.Stop() + ubs := NewUnbufferedSender(testServerStream, testRangefeedCounter) + require.NoError(t, ubs.Start(ctx, stopper)) + defer ubs.Stop() t.Run("nil handling", func(t *testing.T) { const streamID = 0 const rangeID = 1 streamCtx, cancel := context.WithCancel(context.Background()) - muxer.AddStream(streamID, rangeID, cancel) + ubs.AddStream(streamID, cancel) // Note that kvpb.NewError(nil) == nil. require.Equal(t, testRangefeedCounter.get(), int32(1)) - muxer.DisconnectStreamWithError(streamID, rangeID, kvpb.NewError(nil)) + ubs.SendBufferedError(makeMuxRangefeedErrorEvent(streamID, rangeID, + kvpb.NewError(nil))) require.Equal(t, testRangefeedCounter.get(), int32(0)) require.Equal(t, context.Canceled, streamCtx.Err()) expectedErrEvent := &kvpb.MuxRangeFeedEvent{ @@ -66,13 +80,13 @@ func TestStreamMuxer(t *testing.T) { require.True(t, testServerStream.hasEvent(expectedErrEvent)) // Repeat closing the stream does nothing. - muxer.DisconnectStreamWithError(streamID, rangeID, - kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED))) + ubs.SendBufferedError(makeMuxRangefeedErrorEvent(streamID, rangeID, + kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED)))) time.Sleep(10 * time.Millisecond) - require.Equal(t, 1, testServerStream.totalEventsSent()) + require.Equalf(t, 1, testServerStream.totalEventsSent(), testServerStream.String()) }) - t.Run("send rangefeed completion error", func(t *testing.T) { + t.Run("send rangefeed completion error concurrently", func(t *testing.T) { testRangefeedCompletionErrors := []struct { streamID int64 rangeID roachpb.RangeID @@ -86,7 +100,7 @@ func TestStreamMuxer(t *testing.T) { require.Equal(t, testRangefeedCounter.get(), int32(0)) for _, muxError := range testRangefeedCompletionErrors { - muxer.AddStream(muxError.streamID, muxError.rangeID, func() {}) + ubs.AddStream(muxError.streamID, func() {}) } require.Equal(t, testRangefeedCounter.get(), int32(3)) @@ -96,7 +110,7 @@ func TestStreamMuxer(t *testing.T) { wg.Add(1) go func(streamID int64, rangeID roachpb.RangeID, err error) { defer wg.Done() - muxer.DisconnectStreamWithError(streamID, rangeID, kvpb.NewError(err)) + ubs.SendBufferedError(makeMuxRangefeedErrorEvent(streamID, rangeID, kvpb.NewError(err))) }(muxError.streamID, muxError.rangeID, muxError.Error) } wg.Wait() @@ -120,9 +134,9 @@ func TestStreamMuxer(t *testing.T) { }) } -// TestStreamMuxerOnBlockingIO tests that the -// StreamMuxer.DisconnectStreamWithError doesn't block on IO. -func TestStreamMuxerOnBlockingIO(t *testing.T) { +// TestUnbufferedSenderOnBlockingIO tests that the +// UnbufferedSender.SendBufferedError doesn't block on IO. +func TestUnbufferedSenderOnBlockingIO(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -133,14 +147,14 @@ func TestStreamMuxerOnBlockingIO(t *testing.T) { testServerStream := newTestServerStream() testRangefeedCounter := newTestRangefeedCounter() - muxer := NewStreamMuxer(testServerStream, testRangefeedCounter) - require.NoError(t, muxer.Start(ctx, stopper)) - defer muxer.Stop() + ubs := NewUnbufferedSender(testServerStream, testRangefeedCounter) + require.NoError(t, ubs.Start(ctx, stopper)) + defer ubs.Stop() const streamID = 0 const rangeID = 1 streamCtx, streamCancel := context.WithCancel(context.Background()) - muxer.AddStream(0, rangeID, streamCancel) + ubs.AddStream(0, streamCancel) ev := &kvpb.MuxRangeFeedEvent{ StreamID: streamID, @@ -150,7 +164,7 @@ func TestStreamMuxerOnBlockingIO(t *testing.T) { Span: roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, ResolvedTS: hlc.Timestamp{WallTime: 1}, }) - require.NoError(t, muxer.sender.Send(ev)) + require.NoError(t, ubs.sender.Send(ev)) require.Truef(t, testServerStream.hasEvent(ev), "expected event %v not found in %v", ev, testServerStream) @@ -159,8 +173,8 @@ func TestStreamMuxerOnBlockingIO(t *testing.T) { // Although stream is blocked, we should be able to disconnect the stream // without blocking. - muxer.DisconnectStreamWithError(streamID, rangeID, - kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_NO_LEASEHOLDER))) + ubs.SendBufferedError(makeMuxRangefeedErrorEvent(streamID, rangeID, + kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_NO_LEASEHOLDER)))) require.Equal(t, streamCtx.Err(), context.Canceled) unblock() time.Sleep(100 * time.Millisecond) @@ -175,3 +189,43 @@ func TestStreamMuxerOnBlockingIO(t *testing.T) { require.Truef(t, testServerStream.hasEvent(expectedErrEvent), "expected event %v not found in %v", ev, testServerStream) } + +// TestUnbufferedSenderConcurrentSend tests that UnbufferedSender.SendUnbuffered +// is thread-safe. +func TestUnbufferedSenderWithConcurrentSend(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + + testServerStream := newTestServerStream() + testRangefeedCounter := newTestRangefeedCounter() + ubs := NewUnbufferedSender(testServerStream, testRangefeedCounter) + require.NoError(t, ubs.Start(ctx, stopper)) + defer ubs.Stop() + + ubs.AddStream(1, func() {}) + require.Equal(t, testRangefeedCounter.get(), int32(1)) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val := roachpb.Value{RawBytes: []byte("val"), Timestamp: hlc.Timestamp{WallTime: 1}} + ev1 := new(kvpb.RangeFeedEvent) + ev1.MustSetValue(&kvpb.RangeFeedValue{Key: keyA, Value: val, PrevValue: val}) + require.NoError(t, ubs.SendUnbuffered(&kvpb.MuxRangeFeedEvent{ + StreamID: 1, + RangeID: 1, + RangeFeedEvent: *ev1, + })) + }() + } + wg.Wait() + + require.Equal(t, 10, testServerStream.eventsSent) +} diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index 4bcf890d4bc4..13767ce4249f 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -24,10 +24,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/allocatorimpl" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/plan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/gc" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/load" @@ -300,6 +302,10 @@ type Replica struct { // See replica_circuit_breaker.go for details. breaker *replicaCircuitBreaker + // flowControlV2 integrates with RACv2. The value retrieved from + // GetEnabledWhenLeader is consistent with raftMu.flowControlLevel. + flowControlV2 replica_rac2.Processor + // raftMu protects Raft processing the replica. // // Locking notes: Replica.raftMu < Replica.mu @@ -321,6 +327,8 @@ type Replica struct { // to be applied. Currently, it only tracks bytes used by committed entries // being applied to the state machine. bytesAccount logstore.BytesAccount + + flowControlLevel replica_rac2.EnabledWhenLeaderLevel } // localMsgs contains a collection of raftpb.Message that target the local @@ -330,6 +338,8 @@ type Replica struct { // - Replica.localMsgs must be held to append messages to active. // - Replica.raftMu and Replica.localMsgs must both be held to switch slices. // - Replica.raftMu < Replica.localMsgs + // + // TODO(pav-kv): replace these with log marks for the latest completed write. localMsgs struct { syncutil.Mutex active, recycled []raftpb.Message @@ -864,6 +874,10 @@ type Replica struct { // both the leaseholder and raft leader. // // Accessing it requires Replica.mu to be held, exclusively. + // + // There is a one-way transition from RACv1 => RACv2 that causes the + // existing real implementation to be destroyed and replaced with a real + // implementation. replicaFlowControlIntegration replicaFlowControlIntegration } @@ -914,6 +928,12 @@ type Replica struct { // changes for a range on the leaseholder. allocatorToken *plan.AllocatorToken + // lastProblemRangeReplicateEnqueueTime is the last time this replica was + // eagerly enqueued into the replicate queue due to being underreplicated + // or having a decommissioning replica. This is used to throttle enqueue + // attempts. + lastProblemRangeReplicateEnqueueTime atomic.Value + // unreachablesMu contains a set of remote ReplicaIDs that are to be reported // as unreachable on the next raft tick. unreachablesMu struct { @@ -2537,3 +2557,64 @@ func (r *Replica) ReadProtectedTimestampsForTesting(ctx context.Context) (err er func (r *Replica) GetMutexForTesting() *ReplicaMutex { return &r.mu.ReplicaMutex } + +func racV2EnabledWhenLeaderLevel( + ctx context.Context, st *cluster.Settings, +) replica_rac2.EnabledWhenLeaderLevel { + // TODO(sumeer): implement fully, once all the dependencies are implemented. + return replica_rac2.NotEnabledWhenLeader +} + +// maybeEnqueueProblemRange will enqueue the replica for processing into the +// replicate queue iff: +// +// - The replica is the holder of a valid lease. +// - EnqueueProblemRangeInReplicateQueueInterval is enabled (set to a +// non-zero value) +// - The last time the replica was enqueued is longer than +// EnqueueProblemRangeInReplicateQueueInterval. +// +// The replica is enqueued at a decommissioning priority. Note that by default, +// this behavior is disabled (zero interval). Also note that this method should +// NOT be called unless the range is known to require action e.g., +// decommissioning|underreplicated. +// +// NOTE: This method is motivated by a bug where decommissioning stalls because +// a decommissioning range is not enqueued in the replicate queue in a timely +// manner via the replica scanner, see #130199. This functionality is disabled +// by default for this reason. +func (r *Replica) maybeEnqueueProblemRange( + ctx context.Context, now time.Time, leaseValid, isLeaseholder bool, +) { + // The method expects the caller to provide whether the lease is valid and + // the replica is the leaseholder for the range, so that it can avoid + // unnecessary work. We expect this method to be called in the context of + // updating metrics. + if !isLeaseholder || !leaseValid { + // The replicate queue will not process the replica without a valid lease. + // Nothing to do. + return + } + + interval := EnqueueProblemRangeInReplicateQueueInterval.Get(&r.store.cfg.Settings.SV) + if interval == 0 { + // The setting is disabled. + return + } + lastTime := r.lastProblemRangeReplicateEnqueueTime.Load().(time.Time) + if lastTime.Add(interval).After(now) { + // The last time the replica was enqueued is less than the interval ago, + // nothing to do. + return + } + // The replica is the leaseholder for a range which requires action and it + // has been longer than EnqueueProblemRangeInReplicateQueueInterval since the + // last time it was enqueued. Try to swap the last time with now. We don't + // expect a race, however if the value changed underneath us we won't enqueue + // the replica as we lost the race. + if !r.lastProblemRangeReplicateEnqueueTime.CompareAndSwap(lastTime, now) { + return + } + r.store.replicateQueue.AddAsync(ctx, r, + allocatorimpl.AllocatorReplaceDecommissioningVoter.Priority()) +} diff --git a/pkg/kv/kvserver/replica_backpressure.go b/pkg/kv/kvserver/replica_backpressure.go index 031f0a5fc5c1..dd122900ee6f 100644 --- a/pkg/kv/kvserver/replica_backpressure.go +++ b/pkg/kv/kvserver/replica_backpressure.go @@ -24,11 +24,31 @@ import ( var backpressureLogLimiter = log.Every(500 * time.Millisecond) +// backpressureRangeHardCap is a hard cap on the absolute size a range is +// allowed to grow to before backpressure will be applied to all writes. This +// absolute value takes precedence over any user-defined zone configuration +// value in conjunction with the backpressureRangeSizeMultiplier. It is intended +// to be the last defense against absurdly large ranges, in cases where +// backpressure is disabled by setting backpressureRangeSizeMultiplier to 0, or +// doesn't apply due to the backpressureByteTolerance[1], or a user has +// fat-fingered a zone configuration. +// +// [1] See comment on backpressureByteTolerance about the risk of disabling +// backpressure with that setting. +var backpressureRangeHardCap = settings.RegisterByteSizeSetting( + settings.SystemOnly, + "kv.range.range_size_hard_cap", + "hard cap on the maximum size a range is allowed to grow to without"+ + "splitting before writes to the range are blocked. Takes precedence over all other configurations", + 8<<30, /* 8 GiB */ + settings.ByteSizeWithMinimum(64<<20 /* 64 MiB */), + settings.WithPublic, +) + // backpressureRangeSizeMultiplier is the multiple of range_max_bytes that a // range's size must grow to before backpressure will be applied on writes. Set // to 0 to disable backpressure altogether. -var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting( - settings.SystemOnly, +var backpressureRangeSizeMultiplier = settings.RegisterFloatSetting(settings.SystemOnly, "kv.range.backpressure_range_size_multiplier", "multiple of range_max_bytes that a range is allowed to grow to without "+ "splitting before writes to that range are blocked, or 0 to disable", @@ -123,16 +143,27 @@ func (r *Replica) signallerForBatch(ba *kvpb.BatchRequest) signaller { // relation to the split size. The method returns true if the range is more // than backpressureRangeSizeMultiplier times larger than the split size but not // larger than that by more than backpressureByteTolerance (see that comment for -// further explanation). +// further explanation). It ensures that writes are always backpressured if the +// range's size is already larger than the absolute maximum we'll allow. func (r *Replica) shouldBackpressureWrites() bool { + r.mu.RLock() + defer r.mu.RUnlock() + + // Check if the current range's size is already over the absolute maximum + // we'll allow. Don't bother with any multipliers/byte tolerance calculations + // if it is. + rangeSizeHardCap := backpressureRangeHardCap.Get(&r.store.cfg.Settings.SV) + size := r.mu.state.Stats.Total() + if size >= rangeSizeHardCap { + return true + } + mult := backpressureRangeSizeMultiplier.Get(&r.store.cfg.Settings.SV) if mult == 0 { // Disabled. return false } - r.mu.RLock() - defer r.mu.RUnlock() exceeded, bytesOver := r.exceedsMultipleOfSplitSizeRLocked(mult) if !exceeded { return false diff --git a/pkg/kv/kvserver/replica_destroy.go b/pkg/kv/kvserver/replica_destroy.go index f3ef10e11d9c..245cc957fc72 100644 --- a/pkg/kv/kvserver/replica_destroy.go +++ b/pkg/kv/kvserver/replica_destroy.go @@ -169,6 +169,7 @@ func (r *Replica) destroyRaftMuLocked(ctx context.Context, nextReplicaID roachpb // is one, releases all held flow tokens, and removes the in-memory raft state. func (r *Replica) disconnectReplicationRaftMuLocked(ctx context.Context) { r.raftMu.AssertHeld() + r.flowControlV2.OnDestroyRaftMuLocked(ctx) r.mu.Lock() defer r.mu.Unlock() // NB: In the very rare scenario that we're being removed but currently diff --git a/pkg/kv/kvserver/replica_init.go b/pkg/kv/kvserver/replica_init.go index 1fad7fd4b2df..b5ac65931fc1 100644 --- a/pkg/kv/kvserver/replica_init.go +++ b/pkg/kv/kvserver/replica_init.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/plan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/tracker" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvstorage" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/load" @@ -183,6 +184,7 @@ func newUninitializedReplicaWithoutRaftGroup( store.rebalanceObjManager.Objective().ToSplitObjective(), ) } + r.lastProblemRangeReplicateEnqueueTime.Store(store.Clock().PhysicalTime()) // NB: the state will be loaded when the replica gets initialized. r.mu.state = uninitState @@ -222,6 +224,20 @@ func newUninitializedReplicaWithoutRaftGroup( makeStoreFlowControlHandleFactory(r.store), r.store.TestingKnobs().FlowControlTestingKnobs, ) + r.raftMu.flowControlLevel = racV2EnabledWhenLeaderLevel(r.raftCtx, store.cfg.Settings) + r.flowControlV2 = replica_rac2.NewProcessor(replica_rac2.ProcessorOptions{ + NodeID: store.NodeID(), + StoreID: r.StoreID(), + RangeID: r.RangeID, + ReplicaID: r.replicaID, + Replica: (*replicaForRACv2)(r), + RaftScheduler: r.store.scheduler, + AdmittedPiggybacker: r.store.cfg.KVFlowAdmittedPiggybacker, + ACWorkQueue: r.store.cfg.KVAdmissionController, + EvalWaitMetrics: r.store.cfg.KVFlowEvalWaitMetrics, + RangeControllerFactory: r.store.kvflowRangeControllerFactory, + EnabledWhenLeaderLevel: r.raftMu.flowControlLevel, + }) return r } @@ -417,6 +433,7 @@ func (r *Replica) setDescLockedRaftMuLocked(ctx context.Context, desc *roachpb.R r.concMgr.OnRangeDescUpdated(desc) r.mu.state.Desc = desc r.mu.replicaFlowControlIntegration.onDescChanged(ctx) + r.flowControlV2.OnDescChangedLocked(ctx, desc, r.mu.tenantID) // Give the liveness and meta ranges high priority in the Raft scheduler, to // avoid head-of-line blocking and high scheduling latency. diff --git a/pkg/kv/kvserver/replica_init_test.go b/pkg/kv/kvserver/replica_init_test.go index a8792bb8fcee..776add8ff42f 100644 --- a/pkg/kv/kvserver/replica_init_test.go +++ b/pkg/kv/kvserver/replica_init_test.go @@ -14,6 +14,7 @@ import ( "context" "testing" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -65,6 +66,7 @@ func TestReplicaUpdateLastReplicaAdded(t *testing.T) { r.mu.state.Desc = &c.oldDesc r.mu.lastReplicaAdded = c.lastReplicaAdded r.mu.replicaFlowControlIntegration = newReplicaFlowControlIntegration((*replicaFlowControl)(&r), nil, nil) + r.flowControlV2 = noopProcessor{} r.store = tc.store r.concMgr = tc.repl.concMgr r.setDescRaftMuLocked(context.Background(), &c.newDesc) @@ -75,3 +77,16 @@ func TestReplicaUpdateLastReplicaAdded(t *testing.T) { }) } } + +// noopProcessor provides a noop implementation of OnDescChangedLocked, since +// the test does not initialize any of the dependencies needed by a real +// replica_rac2.Processor. +type noopProcessor struct { + // Always nil + replica_rac2.Processor +} + +func (p noopProcessor) OnDescChangedLocked( + ctx context.Context, desc *roachpb.RangeDescriptor, tenantID roachpb.TenantID, +) { +} diff --git a/pkg/kv/kvserver/replica_metrics.go b/pkg/kv/kvserver/replica_metrics.go index 6b785da3cd18..818c62b34bf5 100644 --- a/pkg/kv/kvserver/replica_metrics.go +++ b/pkg/kv/kvserver/replica_metrics.go @@ -51,7 +51,9 @@ type ReplicaMetrics struct { Unavailable bool Underreplicated bool Overreplicated bool + Decommissioning bool RaftLogTooLarge bool + RangeTooLarge bool BehindCount int64 PausedFollowerCount int64 PendingRaftProposalCount int64 @@ -109,6 +111,7 @@ func (r *Replica) Metrics( lockTableMetrics: lockTableMetrics, raftLogSize: r.mu.raftLogSize, raftLogSizeTrusted: r.mu.raftLogSizeTrusted, + rangeSize: r.mu.state.Stats.Total(), qpUsed: qpUsed, qpCapacity: qpCap, paused: r.mu.pausedFollowers, @@ -138,6 +141,7 @@ type calcReplicaMetricsInput struct { lockTableMetrics concurrency.LockTableMetrics raftLogSize int64 raftLogSizeTrusted bool + rangeSize int64 qpUsed, qpCapacity int64 // quota pool used and capacity bytes paused map[roachpb.ReplicaID]struct{} pendingRaftProposalCount int64 @@ -164,8 +168,14 @@ func calcReplicaMetrics(d calcReplicaMetricsInput) ReplicaMetrics { } } - rangeCounter, unavailable, underreplicated, overreplicated := calcRangeCounter( - d.storeID, d.desc, d.leaseStatus, d.vitalityMap, d.conf.GetNumVoters(), d.conf.NumReplicas, d.clusterNodes) + const ( + raftLogTooLargeMultiple = 4 + rangeTooLargeMultiple = 2 + ) + largeRangeThreshold := rangeTooLargeMultiple * d.conf.RangeMaxBytes + rangeCounter, unavailable, underreplicated, overreplicated, tooLarge, decommissioning := calcRangeCounter( + d.storeID, d.desc, d.leaseStatus, d.vitalityMap, d.conf.GetNumVoters(), d.conf.NumReplicas, + d.clusterNodes, largeRangeThreshold, d.rangeSize) // The raft leader computes the number of raft entries that replicas are // behind. @@ -176,7 +186,6 @@ func calcReplicaMetrics(d calcReplicaMetricsInput) ReplicaMetrics { leaderPausedFollowerCount = int64(len(d.paused)) } - const raftLogTooLargeMultiple = 4 return ReplicaMetrics{ Leader: leader, LeaseValid: validLease, @@ -192,8 +201,10 @@ func calcReplicaMetrics(d calcReplicaMetricsInput) ReplicaMetrics { Unavailable: unavailable, Underreplicated: underreplicated, Overreplicated: overreplicated, + Decommissioning: decommissioning, RaftLogTooLarge: d.raftLogSizeTrusted && d.raftLogSize > raftLogTooLargeMultiple*d.raftCfg.RaftLogTruncationThreshold, + RangeTooLarge: tooLarge, BehindCount: leaderBehindCount, PausedFollowerCount: leaderPausedFollowerCount, PendingRaftProposalCount: d.pendingRaftProposalCount, @@ -217,9 +228,10 @@ func calcQuotaPoolPercentUsed(qpUsed, qpCapacity int64) int64 { // calcRangeCounter returns whether this replica is designated as the replica in // the range responsible for range-level metrics, whether the range doesn't have -// a quorum of live voting replicas, and whether the range is currently +// a quorum of live voting replicas, whether the range is currently // under-replicated (with regards to either the number of voting replicas or the -// number of non-voting replicas). +// number of non-voting replicas), and whether the range is considered too +// large. // // Note: we compute an estimated range count across the cluster by counting the // leaseholder of each descriptor if it's live, otherwise the first live @@ -232,7 +244,8 @@ func calcRangeCounter( vitalityMap livenesspb.NodeVitalityMap, numVoters, numReplicas int32, clusterNodes int, -) (rangeCounter, unavailable, underreplicated, overreplicated bool) { + rangeTooLargeThreshold, rangeSize int64, +) (rangeCounter, unavailable, underreplicated, overreplicated, tooLarge, decommissioning bool) { // If there is a live leaseholder (regardless of whether the lease is still // valid) that leaseholder is responsible for range-level metrics. if vitalityMap[leaseStatus.Lease.Replica.NodeID].IsLive(livenesspb.Metrics) { @@ -267,6 +280,8 @@ func calcRangeCounter( } else if neededVoters < liveVoters || neededNonVoters < liveNonVoters { overreplicated = true } + tooLarge = rangeSize > rangeTooLargeThreshold + decommissioning = calcDecommissioningCount(desc, vitalityMap) > 0 } return } @@ -321,6 +336,18 @@ func calcBehindCount( return behindCount } +func calcDecommissioningCount( + desc *roachpb.RangeDescriptor, vitalityMap livenesspb.NodeVitalityMap, +) int { + var decommissioningCount int + for _, rd := range desc.Replicas().Descriptors() { + if vitalityMap[rd.NodeID].IsDecommissioning() { + decommissioningCount++ + } + } + return decommissioningCount +} + // LoadStats returns the load statistics for the replica. func (r *Replica) LoadStats() load.ReplicaLoadStats { return r.loadStats.Stats() diff --git a/pkg/kv/kvserver/replica_metrics_test.go b/pkg/kv/kvserver/replica_metrics_test.go index c6f1c0d0b409..58a58b265e4a 100644 --- a/pkg/kv/kvserver/replica_metrics_test.go +++ b/pkg/kv/kvserver/replica_metrics_test.go @@ -55,20 +55,21 @@ func TestCalcRangeCounterIsLiveMap(t *testing.T) { })) { - ctr, down, under, over := calcRangeCounter(1100, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(1100, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ 1000: livenesspb.FakeNodeVitality(true), // by NodeID - }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.True(t, ctr) require.True(t, down) require.True(t, under) require.False(t, over) + require.False(t, decom) } { - ctr, down, under, over := calcRangeCounter(1000, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(1000, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ 1000: livenesspb.FakeNodeVitality(false), - }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) // Does not confuse a non-live entry for a live one. In other words, // does not think that the liveness map has only entries for live nodes. @@ -76,65 +77,104 @@ func TestCalcRangeCounterIsLiveMap(t *testing.T) { require.False(t, down) require.False(t, under) require.False(t, over) + require.False(t, decom) } { - ctr, down, under, over := calcRangeCounter(11, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(11, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ 10: livenesspb.FakeNodeVitality(true), 100: livenesspb.FakeNodeVitality(true), 1000: livenesspb.FakeNodeVitality(true), 2000: livenesspb.FakeNodeVitality(true), - }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.True(t, ctr) require.False(t, down) require.False(t, under) require.False(t, over) + require.False(t, decom) } { // Single non-voter dead - ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ 10: livenesspb.FakeNodeVitality(true), 100: livenesspb.FakeNodeVitality(true), 1000: livenesspb.FakeNodeVitality(false), 2000: livenesspb.FakeNodeVitality(true), - }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.True(t, ctr) require.False(t, down) require.True(t, under) require.False(t, over) + require.False(t, decom) } { // All non-voters are dead, but range is not unavailable - ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ 10: livenesspb.FakeNodeVitality(true), 100: livenesspb.FakeNodeVitality(false), 1000: livenesspb.FakeNodeVitality(false), 2000: livenesspb.FakeNodeVitality(false), - }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + }, 1 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.True(t, ctr) require.False(t, down) require.True(t, under) require.False(t, over) + require.False(t, decom) } { // More non-voters than needed - ctr, down, under, over := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ + ctr, down, under, over, _, decom := calcRangeCounter(11, oneVoterAndThreeNonVoters, leaseStatus, livenesspb.NodeVitalityMap{ 10: livenesspb.FakeNodeVitality(true), 100: livenesspb.FakeNodeVitality(true), 1000: livenesspb.FakeNodeVitality(true), 2000: livenesspb.FakeNodeVitality(true), - }, 1 /* numVoters */, 3 /* numReplicas */, 4 /* clusterNodes */) + }, 1 /* numVoters */, 3 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.True(t, ctr) require.False(t, down) require.False(t, under) require.True(t, over) + require.False(t, decom) + } + + { + // Range larger than the threshold. + ctr, _, _, _, large, _ := calcRangeCounter(1100, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ + 1000: livenesspb.FakeNodeVitality(true), // by NodeID + }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 1000 /* rangeTooLargeThreshold */, 2000 /* rangeSize */) + + require.True(t, ctr) + require.True(t, large) + } + + { + ctr, _, _, _, large, _ := calcRangeCounter(1000, threeVotersAndSingleNonVoter, leaseStatus, livenesspb.NodeVitalityMap{ + 1000: livenesspb.FakeNodeVitality(false), + }, 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 1000 /* rangeTooLargeThreshold */, 2000 /* rangeSize */) + require.False(t, ctr) + // Only the node responsible for the range can report if the range is too + // large. + require.False(t, large) + } + + { + // Decommissioning node. + vitality := livenesspb.TestCreateNodeVitality(10, 100, 1000, 2000) + vitality.Decommissioning(100, true /* alive */) + ctr, down, under, over, _, decom := calcRangeCounter(11, threeVotersAndSingleNonVoter, leaseStatus, vitality.ScanNodeVitalityFromCache(), + 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) + + require.True(t, ctr) + require.False(t, down) + require.False(t, under) + require.False(t, over) + require.True(t, decom) } } @@ -242,8 +282,8 @@ func TestCalcRangeCounterLeaseHolder(t *testing.T) { for _, nodeID := range tc.liveNodes { livenessMap[nodeID] = livenesspb.FakeNodeVitality(true) } - ctr, _, _, _ := calcRangeCounter(tc.storeID, rangeDesc, tc.leaseStatus, livenessMap, - 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */) + ctr, _, _, _, _, _ := calcRangeCounter(tc.storeID, rangeDesc, tc.leaseStatus, livenessMap, + 3 /* numVoters */, 4 /* numReplicas */, 4 /* clusterNodes */, 0 /* rangeTooLargeThreshold */, 0 /* rangeSize */) require.Equal(t, tc.expectCounter, ctr) }) } diff --git a/pkg/kv/kvserver/replica_proposal_buf.go b/pkg/kv/kvserver/replica_proposal_buf.go index 854bd589d985..d83c9fde653d 100644 --- a/pkg/kv/kvserver/replica_proposal_buf.go +++ b/pkg/kv/kvserver/replica_proposal_buf.go @@ -144,7 +144,7 @@ type proposer interface { leaseAppliedIndex() kvpb.LeaseAppliedIndex enqueueUpdateCheck() closedTimestampTarget() hlc.Timestamp - shouldCampaignOnRedirect(raftGroup proposerRaft) bool + shouldCampaignOnRedirect(raftGroup proposerRaft, leaseType roachpb.LeaseType) bool // The following require the proposer to hold an exclusive lock. withGroupLocked(func(proposerRaft) error) error @@ -663,7 +663,7 @@ func (b *propBuf) maybeRejectUnsafeProposalLocked( // TODO(nvanbenschoten): move this to replica_range_lease.go when we // support build-time verification for lease acquisition. See #118435. - if p.Request.IsSingleRequestLeaseRequest() && b.p.shouldCampaignOnRedirect(raftGroup) { + if p.Request.IsSingleRequestLeaseRequest() && b.p.shouldCampaignOnRedirect(raftGroup, nextLease.Type()) { const format = "campaigning because Raft leader (id=%d) not live in node liveness map" lead := raftGroup.BasicStatus().Lead if logCampaignOnRejectLease.ShouldLog() { @@ -1221,14 +1221,16 @@ func (rp *replicaProposer) registerProposalLocked(p *ProposalData) { rp.mu.proposals[p.idKey] = p } -func (rp *replicaProposer) shouldCampaignOnRedirect(raftGroup proposerRaft) bool { +func (rp *replicaProposer) shouldCampaignOnRedirect( + raftGroup proposerRaft, leaseType roachpb.LeaseType, +) bool { r := (*Replica)(rp) livenessMap, _ := r.store.livenessMap.Load().(livenesspb.IsLiveMap) return shouldCampaignOnLeaseRequestRedirect( raftGroup.BasicStatus(), livenessMap, r.descRLocked(), - r.shouldUseExpirationLeaseRLocked(), + leaseType, r.store.Clock().Now(), ) } diff --git a/pkg/kv/kvserver/replica_proposal_buf_test.go b/pkg/kv/kvserver/replica_proposal_buf_test.go index d635f75be712..c07ed6a8a052 100644 --- a/pkg/kv/kvserver/replica_proposal_buf_test.go +++ b/pkg/kv/kvserver/replica_proposal_buf_test.go @@ -210,7 +210,9 @@ func (t *testProposer) registerProposalLocked(p *ProposalData) { t.registered++ } -func (t *testProposer) shouldCampaignOnRedirect(raftGroup proposerRaft) bool { +func (t *testProposer) shouldCampaignOnRedirect( + raftGroup proposerRaft, leaseType roachpb.LeaseType, +) bool { return t.leaderNotLive } diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go index c69cd8a7d039..8d7bad76045f 100644 --- a/pkg/kv/kvserver/replica_raft.go +++ b/pkg/kv/kvserver/replica_raft.go @@ -24,6 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/poison" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvadmission" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" @@ -355,6 +357,10 @@ func (r *Replica) evalAndPropose( return proposalCh, abandon, idKey, writeBytes, nil } +func (r *Replica) encodePriorityForRACv2() bool { + return r.flowControlV2.GetEnabledWhenLeader() == replica_rac2.EnabledWhenLeaderV2Encoding +} + // propose encodes a command, starts tracking it, and proposes it to Raft. // // The method hands ownership of the command over to the Raft machinery. After @@ -408,7 +414,7 @@ func (r *Replica) propose( data, err := raftlog.EncodeCommand(ctx, p.command, p.idKey, raftlog.EncodeOptions{ RaftAdmissionMeta: raftAdmissionMeta, - EncodePriority: false, + EncodePriority: r.encodePriorityForRACv2(), }) if err != nil { return kvpb.NewError(err) @@ -586,7 +592,8 @@ var errRemoved = errors.New("replica removed") // unquiesced and ready to handle the request. func (r *Replica) stepRaftGroupRaftMuLocked(req *kvserverpb.RaftMessageRequest) error { r.raftMu.AssertHeld() - return r.withRaftGroup(func(raftGroup *raft.RawNode) (bool, error) { + var sideChannelInfo replica_rac2.SideChannelInfoUsingRaftMessageRequest + err := r.withRaftGroup(func(raftGroup *raft.RawNode) (bool, error) { // We're processing an incoming raft message (from a batch that may // include MsgVotes), so don't campaign if we wake up our raft // group. @@ -634,6 +641,21 @@ func (r *Replica) stepRaftGroupRaftMuLocked(req *kvserverpb.RaftMessageRequest) if term := raftGroup.BasicStatus().Term; term > req.Message.Term { req.Message.Term = term } + case raftpb.MsgApp: + if n := len(req.Message.Entries); n > 0 { + sideChannelInfo = replica_rac2.SideChannelInfoUsingRaftMessageRequest{ + UsingV2Protocol: req.UsingRac2Protocol, + LeaderTerm: req.Message.Term, + First: req.Message.Entries[0].Index, + Last: req.Message.Entries[n-1].Index, + LowPriOverride: req.LowPriorityOverride, + } + } + case raftpb.MsgAppResp: + if req.AdmittedState.Term != 0 { + // TODO(pav-kv): dispatch admitted vector to RACv2 if one is attached. + _ = 0 + } } err := raftGroup.Step(req.Message) if errors.Is(err, raft.ErrProposalDropped) { @@ -646,6 +668,10 @@ func (r *Replica) stepRaftGroupRaftMuLocked(req *kvserverpb.RaftMessageRequest) } return false /* unquiesceAndWakeLeader */, err }) + if sideChannelInfo != (replica_rac2.SideChannelInfoUsingRaftMessageRequest{}) { + r.flowControlV2.SideChannelForPriorityOverrideAtFollowerRaftMuLocked(sideChannelInfo) + } + return err } type handleSnapshotStats struct { @@ -790,6 +816,30 @@ func (r *Replica) handleRaftReadyRaftMuLocked( return handleRaftReadyStats{}, errors.AssertionFailedf( "handleRaftReadyRaftMuLocked cannot be called with a cancellable context") } + // Before doing anything, including calling Ready(), see if we need to + // ratchet up the flow control level. This code will go away when RACv1 => + // RACv2 transition is complete and RACv1 code is removed. + if r.raftMu.flowControlLevel < replica_rac2.EnabledWhenLeaderV2Encoding { + // Not already at highest level. + level := racV2EnabledWhenLeaderLevel(ctx, r.store.cfg.Settings) + if level > r.raftMu.flowControlLevel { + if r.raftMu.flowControlLevel == replica_rac2.NotEnabledWhenLeader { + func() { + r.mu.Lock() + defer r.mu.Unlock() + // This will close all connected streams and consequently all + // requests waiting on v1 kvflowcontrol.ReplicationAdmissionHandles + // will return. + r.mu.replicaFlowControlIntegration.onDestroyed(ctx) + // Replace with a noop integration since want no code to execute on + // various calls. + r.mu.replicaFlowControlIntegration = noopReplicaFlowControlIntegration{} + }() + } + r.raftMu.flowControlLevel = level + r.flowControlV2.SetEnabledWhenLeaderRaftMuLocked(ctx, level) + } + } // NB: we need to reference the named return parameter here. If `stats` were // just a local, we'd be modifying the local but not the return value in the @@ -866,6 +916,10 @@ func (r *Replica) handleRaftReadyRaftMuLocked( } else if err != nil { return stats, errors.Wrap(err, "checking raft group for Ready") } + // Even if we don't have a Ready, or entries in Ready, + // replica_rac2.Processor may need to do some work. + raftEvent := rac2.RaftEventFromMsgStorageAppend(msgStorageAppend) + r.flowControlV2.HandleRaftReadyRaftMuLocked(ctx, raftEvent) if !hasReady { // We must update the proposal quota even if we don't have a ready. // Consider the case when our quota is of size 1 and two out of three @@ -928,9 +982,11 @@ func (r *Replica) handleRaftReadyRaftMuLocked( refreshReason := noReason if hasMsg(msgStorageAppend) { + app := logstore.MakeMsgStorageAppend(msgStorageAppend) + // Leadership changes, if any, are communicated through MsgStorageAppends. // Check if that's the case here. - if msgStorageAppend.Lead != raft.None && leaderID != roachpb.ReplicaID(msgStorageAppend.Lead) { + if app.Lead != raft.None && leaderID != roachpb.ReplicaID(app.Lead) { // Refresh pending commands if the Raft leader has changed. This is // usually the first indication we have of a new leader on a restarted // node. @@ -940,26 +996,26 @@ func (r *Replica) handleRaftReadyRaftMuLocked( // indicating a newly elected leader or a conf change. Replay protection // prevents any corruption, so the waste is only a performance issue. if log.V(3) { - log.Infof(ctx, "raft leader changed: %d -> %d", leaderID, msgStorageAppend.Lead) + log.Infof(ctx, "raft leader changed: %d -> %d", leaderID, app.Lead) } if !r.store.TestingKnobs().DisableRefreshReasonNewLeader { refreshReason = reasonNewLeader } - leaderID = roachpb.ReplicaID(msgStorageAppend.Lead) + leaderID = roachpb.ReplicaID(app.Lead) } - if msgStorageAppend.Snapshot != nil { + if app.Snapshot != nil { if inSnap.Desc == nil { // If we didn't expect Raft to have a snapshot but it has one // regardless, that is unexpected and indicates a programming // error. return stats, errors.AssertionFailedf( "have inSnap=nil, but raft has a snapshot %s", - raft.DescribeSnapshot(*msgStorageAppend.Snapshot), + raft.DescribeSnapshot(*app.Snapshot), ) } - snapUUID, err := uuid.FromBytes(msgStorageAppend.Snapshot.Data) + snapUUID, err := uuid.FromBytes(app.Snapshot.Data) if err != nil { return stats, errors.Wrap(err, "invalid snapshot id") } @@ -970,15 +1026,8 @@ func (r *Replica) handleRaftReadyRaftMuLocked( log.Fatalf(ctx, "incoming snapshot id doesn't match raft snapshot id: %s != %s", snapUUID, inSnap.SnapUUID) } - snap := *msgStorageAppend.Snapshot - hs := raftpb.HardState{ - Term: msgStorageAppend.Term, - Vote: msgStorageAppend.Vote, - Commit: msgStorageAppend.Commit, - Lead: msgStorageAppend.Lead, - LeadEpoch: msgStorageAppend.LeadEpoch, - } - if len(msgStorageAppend.Entries) != 0 { + snap := *app.Snapshot + if len(app.Entries) != 0 { log.Fatalf(ctx, "found Entries in MsgStorageAppend with non-empty Snapshot") } @@ -991,10 +1040,10 @@ func (r *Replica) handleRaftReadyRaftMuLocked( defer releaseMergeLock() stats.tSnapBegin = timeutil.Now() - if err := r.applySnapshot(ctx, inSnap, snap, hs, subsumedRepls); err != nil { + if err := r.applySnapshot(ctx, inSnap, snap, app.HardState(), subsumedRepls); err != nil { return stats, errors.Wrap(err, "while applying snapshot") } - for _, msg := range msgStorageAppend.Responses { + for _, msg := range app.Responses { // The caller would like to see the MsgAppResp that usually results from // applying the snapshot synchronously, so fish it out. if msg.To == raftpb.PeerID(inSnap.FromReplica.ReplicaID) && @@ -1032,10 +1081,10 @@ func (r *Replica) handleRaftReadyRaftMuLocked( } // Send MsgStorageAppend's responses. - r.sendRaftMessages(ctx, msgStorageAppend.Responses, nil /* blocked */, true /* willDeliverLocal */) + r.sendRaftMessages(ctx, app.Responses, nil /* blocked */, true /* willDeliverLocal */) } else { // TODO(pavelkalinnikov): find a way to move it to storeEntries. - if msgStorageAppend.Commit != 0 && !r.IsInitialized() { + if app.Commit != 0 && !r.IsInitialized() { log.Fatalf(ctx, "setting non-zero HardState.Commit on uninitialized replica %s", r) } // TODO(pavelkalinnikov): construct and store this in Replica. @@ -1058,23 +1107,26 @@ func (r *Replica) handleRaftReadyRaftMuLocked( DisableSyncLogWriteToss: buildutil.CrdbTestBuild && r.store.TestingKnobs().DisableSyncLogWriteToss, } - m := logstore.MakeMsgStorageAppend(msgStorageAppend) cb := (*replicaSyncCallback)(r) if r.IsInitialized() && r.store.cfg.KVAdmissionController != nil { // Enqueue raft log entries into admission queues. This is // non-blocking; actual admission happens asynchronously. - tenantID, _ := r.TenantID() - for _, entry := range msgStorageAppend.Entries { - if len(entry.Data) == 0 { - continue // nothing to do + isUsingV2OrDestroyed := r.flowControlV2.AdmitRaftEntriesRaftMuLocked(ctx, raftEvent) + if !isUsingV2OrDestroyed { + // Leader is using RACv1 protocol. + tenantID, _ := r.TenantID() + for _, entry := range raftEvent.Entries { + if len(entry.Data) == 0 { + continue // nothing to do + } + r.store.cfg.KVAdmissionController.AdmitRaftEntry( + ctx, tenantID, r.StoreID(), r.RangeID, r.replicaID, raftEvent.Term, entry, + ) } - r.store.cfg.KVAdmissionController.AdmitRaftEntry( - ctx, tenantID, r.StoreID(), r.RangeID, entry, - ) } } - if state, err = s.StoreEntries(ctx, state, m, cb, &stats.append); err != nil { + if state, err = s.StoreEntries(ctx, state, app, cb, &stats.append); err != nil { return stats, errors.Wrap(err, "while storing log entries") } } @@ -1374,6 +1426,12 @@ func (r *Replica) tick( return true, nil } +func (r *Replica) processRACv2PiggybackedAdmitted(ctx context.Context) bool { + r.raftMu.Lock() + defer r.raftMu.Unlock() + return r.flowControlV2.ProcessPiggybackedAdmittedAtLeaderRaftMuLocked(ctx) +} + func (r *Replica) hasRaftReadyRLocked() bool { return r.mu.internalRaftGroup.HasReady() } @@ -1608,7 +1666,7 @@ func (r *Replica) maybeCoalesceHeartbeat( type replicaSyncCallback Replica func (r *replicaSyncCallback) OnLogSync( - ctx context.Context, msgs []raftpb.Message, commitStats storage.BatchCommitStats, + ctx context.Context, done logstore.MsgStorageAppendDone, commitStats storage.BatchCommitStats, ) { repl := (*Replica)(r) // Block sending the responses back to raft, if a test needs to. @@ -1616,7 +1674,7 @@ func (r *replicaSyncCallback) OnLogSync( fn(repl.ID()) } // Send MsgStorageAppend's responses. - repl.sendRaftMessages(ctx, msgs, nil /* blocked */, false /* willDeliverLocal */) + repl.sendRaftMessages(ctx, done.Responses(), nil /* blocked */, false /* willDeliverLocal */) if commitStats.TotalDuration > defaultReplicaRaftMuWarnThreshold { log.Infof(repl.raftCtx, "slow non-blocking raft commit: %s", commitStats) } @@ -1785,7 +1843,7 @@ func (r *Replica) deliverLocalRaftMsgsRaftMuLockedReplicaMuLocked( for i, m := range localMsgs { if err := raftGroup.Step(m); err != nil { log.Fatalf(ctx, "unexpected error stepping local raft message [%s]: %v", - raftDescribeMessage(m, raftEntryFormatter), err) + raft.DescribeMessage(m, raftEntryFormatter), err) } // NB: we can reset messages in the localMsgs.recycled slice without holding // the localMsgs mutex because no-one ever writes to localMsgs.recycled and @@ -2276,7 +2334,7 @@ func shouldCampaignOnLeaseRequestRedirect( raftStatus raft.BasicStatus, livenessMap livenesspb.IsLiveMap, desc *roachpb.RangeDescriptor, - shouldUseExpirationLease bool, + leaseType roachpb.LeaseType, now hlc.Timestamp, ) bool { // If we're already campaigning don't start a new term. @@ -2291,14 +2349,14 @@ func shouldCampaignOnLeaseRequestRedirect( if raftStatus.Lead == raft.None { return true } - // If we should be using an expiration lease then we don't need to campaign + // If we don't want to use an epoch-based lease then we don't need to campaign // based on liveness state because there can never be a case where a node can // retain Raft leadership but still be unable to acquire the lease. This is // possible on ranges that use epoch-based leases because the Raft leader may // be partitioned from the liveness range. // See TestRequestsOnFollowerWithNonLiveLeaseholder for an example of a test // that demonstrates this case. - if shouldUseExpirationLease { + if leaseType != roachpb.LeaseEpoch { return false } // Determine if we think the leader is alive, if we don't have the leader in @@ -2326,14 +2384,26 @@ func shouldCampaignOnLeaseRequestRedirect( // campaignLocked campaigns for raft leadership, using PreVote and, if // CheckQuorum is enabled, the recent leader condition. That is, followers will -// not grant prevotes if we're behind on the log and, with CheckQuorum, if +// not grant (pre)votes if we're behind on the log and, with CheckQuorum, if // they've heard from a leader in the past election timeout interval. +// Additionally, the local replica will not even begin to campaign if the recent +// leader condition does not allow it to (i.e. this method will be a no-op). +// +// The "recent leader condition" is based on raft heartbeats for ranges that are +// not using the leader fortification protocol. Followers will not vote against +// a leader if they have recently received a heartbeat (or other message) from +// it. For ranges that are using the leader fortification protocol, the "recent +// leader condition" is based on whether a follower is supporting a fortified +// leader. Followers will not campaign or vote against a leader who's fortified +// store liveness epoch they currently support. // // The CheckQuorum condition can delay elections, particularly with quiesced // ranges that don't tick. However, it is necessary to avoid spurious elections // and stolen leaderships during partial/asymmetric network partitions, which // can lead to permanent unavailability if the leaseholder can no longer reach -// the leader. +// the leader. For ranges using the leader fortification protocol, it is also +// necessary to implement irrevocable leader support upon which leader leases +// are built. // // Only followers enforce the CheckQuorum recent leader condition though, so if // a quorum of followers consider the leader dead and choose to become @@ -2360,6 +2430,10 @@ func (r *Replica) campaignLocked(ctx context.Context) { // under partial/asymmetric network partitions. It should only be used when the // caller is certain that the current leader is actually dead, and we're not // simply partitioned away from it and/or liveness. +// +// TODO(nvanbenschoten): this is the remaining logic which needs work in order +// to complete #125254. See the comment in raft.go about how even a local +// fortification check is not enough to make MsgTimeoutNow safe. func (r *Replica) forceCampaignLocked(ctx context.Context) { log.VEventf(ctx, 3, "force campaigning") msg := raftpb.Message{To: raftpb.PeerID(r.replicaID), Type: raftpb.MsgTimeoutNow} diff --git a/pkg/kv/kvserver/replica_rangefeed.go b/pkg/kv/kvserver/replica_rangefeed.go index 247a107100d4..97fd37f0d5ee 100644 --- a/pkg/kv/kvserver/replica_rangefeed.go +++ b/pkg/kv/kvserver/replica_rangefeed.go @@ -93,6 +93,18 @@ var RangeFeedUseScheduler = settings.RegisterBoolSetting( var RangefeedSchedulerDisabled = envutil.EnvOrDefaultBool("COCKROACH_RANGEFEED_DISABLE_SCHEDULER", false) +// RangefeedUseBufferedSender controls whether rangefeed uses a node level +// buffered sender to buffer events instead of buffering events separately in a +// channel at a per client per registration level. It is currently left +// unimplemented and disabled everywhere (#126560). Panics if enabled. +var RangefeedUseBufferedSender = settings.RegisterBoolSetting( + settings.SystemOnly, + "kv.rangefeed.buffered_stream_sender.enabled", + "use buffered sender for all range feeds instead of buffering events "+ + "separately per client per range", + false, +) + func init() { // Inject into kvserverbase to allow usage from kvcoord. kvserverbase.RangeFeedRefreshInterval = RangeFeedRefreshInterval diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index ccb667b88299..dfd82c73a445 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -79,9 +79,9 @@ func (s *testStream) Cancel() { s.cancel() } -func (s *testStream) SendIsThreadSafe() {} +func (s *testStream) SendUnbufferedIsThreadSafe() {} -func (s *testStream) Send(e *kvpb.RangeFeedEvent) error { +func (s *testStream) SendUnbuffered(e *kvpb.RangeFeedEvent) error { s.mu.Lock() defer s.mu.Unlock() s.mu.events = append(s.mu.events, e) @@ -1521,8 +1521,6 @@ func TestRangefeedCheckpointsRecoverFromLeaseExpiration(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - skip.WithIssue(t, 123551) - ctx := context.Background() var scratchRangeID int64 // accessed atomically // nudgeSeen will be set if a request filter sees the signature of the @@ -1663,13 +1661,25 @@ func TestRangefeedCheckpointsRecoverFromLeaseExpiration(t *testing.T) { // Expire the lease. Given that the Raft leadership is on n2, only n2 will be // eligible to acquire a new lease. log.Infof(ctx, "test expiring lease") - nl := n2.NodeLiveness().(*liveness.NodeLiveness) - resumeHeartbeats := nl.PauseAllHeartbeatsForTest() - n2Liveness, ok := nl.Self() + nl2 := n2.NodeLiveness().(*liveness.NodeLiveness) + resumeHeartbeats := nl2.PauseAllHeartbeatsForTest() + n2Liveness, ok := nl2.Self() require.True(t, ok) - manualClock.Increment(n2Liveness.Expiration.ToTimestamp().Add(1, 0).WallTime - manualClock.UnixNano()) + manualClock.Increment(max(firstLease.MinExpiration.WallTime, n2Liveness.Expiration.ToTimestamp(). + Add(1, 0).WallTime) - manualClock.UnixNano()) atomic.StoreInt64(&rejectExtraneousRequests, 1) - // Ask another node to increment n2's liveness record. + + // Ask another node to increment n2's liveness record, but first, wait until + // n1's liveness state is the same as n2's. Otherwise, the epoch below might + // get rejected because of mismatching liveness records. + testutils.SucceedsSoon(t, func() error { + nl1 := n1.NodeLiveness().(*liveness.NodeLiveness) + n2LivenessFromN1, _ := nl1.GetLiveness(n2.NodeID()) + if n2Liveness != n2LivenessFromN1.Liveness { + return errors.Errorf("waiting for node 2 liveness to converge on both nodes 1 and 2") + } + return nil + }) require.NoError(t, n1.NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch(ctx, n2Liveness)) resumeHeartbeats() diff --git a/pkg/kv/kvserver/replica_store_liveness.go b/pkg/kv/kvserver/replica_store_liveness.go index c5db765187a6..fc9f7c35cc8c 100644 --- a/pkg/kv/kvserver/replica_store_liveness.go +++ b/pkg/kv/kvserver/replica_store_liveness.go @@ -16,7 +16,6 @@ import ( "fmt" "hash/fnv" - "github.com/cockroachdb/cockroach/pkg/clusterversion" slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" @@ -94,12 +93,7 @@ func (r *replicaRLockedStoreLiveness) SupportFrom( // SupportFromEnabled implements the raftstoreliveness.StoreLiveness interface. func (r *replicaRLockedStoreLiveness) SupportFromEnabled() bool { - // TODO(mira): this version check is incorrect. For one, it doesn't belong - // here. Instead, the version should be checked when deciding to enable - // StoreLiveness or not. Then, the check here should only check whether store - // liveness is enabled. - storeLivenessEnabled := r.store.ClusterSettings().Version.IsActive(context.TODO(), clusterversion.V24_3_StoreLivenessEnabled) - if !storeLivenessEnabled { + if !r.store.storeLiveness.SupportFromEnabled(context.TODO()) { return false } fracEnabled := raftLeaderFortificationFractionEnabled.Get(&r.store.ClusterSettings().SV) diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 0d2e74e3b1ac..f6242a6108ca 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -11574,11 +11574,11 @@ func TestReplicaShouldCampaignOnLeaseRequestRedirect(t *testing.T) { defer log.Scope(t).Close(t) type params struct { - raftStatus raft.BasicStatus - livenessMap livenesspb.IsLiveMap - desc *roachpb.RangeDescriptor - shouldUseExpirationLease bool - now hlc.Timestamp + raftStatus raft.BasicStatus + livenessMap livenesspb.IsLiveMap + desc *roachpb.RangeDescriptor + leaseType roachpb.LeaseType + now hlc.Timestamp } // Set up a base state that we can vary, representing this node n1 being a @@ -11608,7 +11608,8 @@ func TestReplicaShouldCampaignOnLeaseRequestRedirect(t *testing.T) { 2: livenesspb.IsLiveMapEntry{IsLive: false}, 3: livenesspb.IsLiveMapEntry{IsLive: false}, }, - now: hlc.Timestamp{Logical: 10}, + leaseType: roachpb.LeaseEpoch, + now: hlc.Timestamp{Logical: 10}, } testcases := map[string]struct { @@ -11632,7 +11633,10 @@ func TestReplicaShouldCampaignOnLeaseRequestRedirect(t *testing.T) { p.raftStatus.Lead = raft.None }}, "should use expiration lease": {false, func(p *params) { - p.shouldUseExpirationLease = true + p.leaseType = roachpb.LeaseExpiration + }}, + "should use leader lease": {false, func(p *params) { + p.leaseType = roachpb.LeaseLeader }}, "leader not in desc": {false, func(p *params) { p.raftStatus.Lead = 4 @@ -11678,7 +11682,7 @@ func TestReplicaShouldCampaignOnLeaseRequestRedirect(t *testing.T) { } tc.modify(&p) require.Equal(t, tc.expect, shouldCampaignOnLeaseRequestRedirect( - p.raftStatus, p.livenessMap, p.desc, p.shouldUseExpirationLease, p.now)) + p.raftStatus, p.livenessMap, p.desc, p.leaseType, p.now)) }) } } @@ -14035,7 +14039,7 @@ func TestReplicaRateLimit(t *testing.T) { cfg.TestingKnobs.DisableMergeWaitForReplicasInit = true // Use time travel to control the rate limiter in this test. Set authorizer to // engage the rate limiter, overriding the default allow-all policy in tests. - cfg.TestingKnobs.TenantRateKnobs.TimeSource = tc.manualClock + cfg.TestingKnobs.TenantRateKnobs.QuotaPoolOptions = []quotapool.Option{quotapool.WithTimeSource(tc.manualClock)} cfg.TestingKnobs.TenantRateKnobs.Authorizer = tenantcapabilitiesauthorizer.New(cfg.Settings, nil) tc.StartWithStoreConfig(ctx, t, stopper, cfg) @@ -14744,7 +14748,7 @@ func TestReplayWithBumpedTimestamp(t *testing.T) { return err }) if err != nil { - t.Errorf(err.Error()) + t.Error(err) } }() diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index 7a99316c040d..b0e255194425 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -89,6 +89,22 @@ var EnqueueInReplicateQueueOnSpanConfigUpdateEnabled = settings.RegisterBoolSett true, ) +// EnqueueProblemRangeInReplicateQueueInterval controls the interval at which +// problem ranges are enqueued into the replicate queue for processing, outside +// of the normal scanner interval. A problem range is one which is +// underreplicated or has a replica on a decommissioning store. The setting is +// disabled when set to 0. By default, the setting is disabled. +var EnqueueProblemRangeInReplicateQueueInterval = settings.RegisterDurationSetting( + settings.SystemOnly, + "kv.enqueue_in_replicate_queue_on_problem.interval", + "interval at which problem ranges are enqueued into the replicate queue for "+ + "processing, outside of the normal scanner interval; a problem range is "+ + "one which is underreplicated or has a replica on a decommissioning store, "+ + "disabled when set to 0", + 0, + settings.NonNegativeDuration, +) + var ( metaReplicateQueueAddReplicaCount = metric.Metadata{ Name: "queue.replicate.addreplica", diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index 4884de5b55da..41c8aa263996 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -2436,3 +2436,70 @@ func TestReplicateQueueAllocatorToken(t *testing.T) { var allocationError allocator.AllocationError require.ErrorAs(t, processErr, &allocationError) } + +// TestReplicateQueueDecommissionScannerDisabled asserts that decommissioning +// replicas are replaced by the replicate queue despite the scanner being +// disabled, when EnqueueProblemRangeInReplicateQueueInterval is set to a +// non-zero value (enabled). +func TestReplicateQueueDecommissionScannerDisabled(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // Enable enqueueing of problem ranges in the replicate queue at most once + // per second. We disable the scanner to ensure that the replicate queue + // doesn't rely on the scanner to process decommissioning replicas. + settings := cluster.MakeTestingClusterSettings() + kvserver.EnqueueProblemRangeInReplicateQueueInterval.Override( + context.Background(), &settings.SV, 1*time.Second) + + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 5, base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + Settings: settings, + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + // Disable the scanner. + ScanInterval: 100 * time.Hour, + ScanMinIdleTime: 100 * time.Hour, + ScanMaxIdleTime: 100 * time.Hour, + }, + }) + defer tc.Stopper().Stop(ctx) + + decommissioningSrvIdx := 4 + decommissioningSrv := tc.Server(decommissioningSrvIdx) + require.NoError(t, decommissioningSrv.Decommission(ctx, + livenesspb.MembershipStatus_DECOMMISSIONING, + []roachpb.NodeID{tc.Server(decommissioningSrvIdx).NodeID()})) + + // Ensure that the node is marked as decommissioning on every other node. + // Once this is set, we also know that the onDecommissioning callback has + // fired, which enqueues every range which is on the decommissioning node. + testutils.SucceedsSoon(t, func() error { + for i := 0; i < tc.NumServers(); i++ { + srv := tc.Server(i) + if _, exists := srv.DecommissioningNodeMap()[decommissioningSrv.NodeID()]; !exists { + return errors.Newf("node %d not detected to be decommissioning", decommissioningSrv.NodeID()) + } + } + return nil + }) + + // Now add a replica to the decommissioning node and then enable the + // replicate queue. We expect that the replica will be removed after the + // decommissioning replica is noticed via maybeEnqueueProblemRange. + scratchKey := tc.ScratchRange(t) + tc.AddVotersOrFatal(t, scratchKey, tc.Target(decommissioningSrvIdx)) + tc.ToggleReplicateQueues(true /* active */) + testutils.SucceedsSoon(t, func() error { + var descs []*roachpb.RangeDescriptor + tc.GetFirstStoreFromServer(t, decommissioningSrvIdx).VisitReplicas(func(r *kvserver.Replica) bool { + descs = append(descs, r.Desc()) + return true + }) + if len(descs) != 0 { + return errors.Errorf("expected no replicas, found %d: %v", len(descs), descs) + } + return nil + }) +} diff --git a/pkg/kv/kvserver/scheduler.go b/pkg/kv/kvserver/scheduler.go index c933df7f7ab9..b1c8462f484c 100644 --- a/pkg/kv/kvserver/scheduler.go +++ b/pkg/kv/kvserver/scheduler.go @@ -122,6 +122,9 @@ type raftProcessor interface { // Process a raft tick for the specified range. // Return true if the range should be queued for ready processing. processTick(context.Context, roachpb.RangeID) bool + // Process a piggybacked raftpb.Message that advances admitted. Used for + // RACv2. Returns true if the range should be queued for ready processing. + processRACv2PiggybackedAdmitted(ctx context.Context, id roachpb.RangeID) bool } type raftScheduleFlags int @@ -131,6 +134,7 @@ const ( stateRaftReady stateRaftRequest stateRaftTick + stateRACv2PiggybackedAdmitted ) type raftScheduleState struct { @@ -409,6 +413,15 @@ func (ss *raftSchedulerShard) worker( } } } + if state.flags&stateRACv2PiggybackedAdmitted != 0 { + // processRACv2PiggybackedAdmitted returns true if the range should + // perform ready processing. Do not reorder this below the call to + // processReady. + if processor.processRACv2PiggybackedAdmitted(ctx, id) { + state.flags |= stateRaftReady + } + } + if state.flags&stateRaftReady != 0 { processor.processReady(id) } @@ -549,6 +562,10 @@ func (s *raftScheduler) EnqueueRaftTicks(batch *raftSchedulerBatch) { s.enqueueBatch(stateRaftTick, batch) } +func (s *raftScheduler) EnqueueRACv2PiggybackAdmitted(id roachpb.RangeID) { + s.enqueue1(stateRACv2PiggybackedAdmitted, id) +} + func nowNanos() int64 { return timeutil.Now().UnixNano() } diff --git a/pkg/kv/kvserver/scheduler_test.go b/pkg/kv/kvserver/scheduler_test.go index 2663ba7c257c..f8a21471be63 100644 --- a/pkg/kv/kvserver/scheduler_test.go +++ b/pkg/kv/kvserver/scheduler_test.go @@ -179,6 +179,12 @@ func (p *testProcessor) processTick(_ context.Context, rangeID roachpb.RangeID) return false } +func (p *testProcessor) processRACv2PiggybackedAdmitted( + ctx context.Context, id roachpb.RangeID, +) bool { + return false +} + func (p *testProcessor) readyCount(rangeID roachpb.RangeID) int { p.mu.Lock() defer p.mu.Unlock() diff --git a/pkg/kv/kvserver/split_delay_helper_test.go b/pkg/kv/kvserver/split_delay_helper_test.go index dedcc82e4639..cda9f203da3b 100644 --- a/pkg/kv/kvserver/split_delay_helper_test.go +++ b/pkg/kv/kvserver/split_delay_helper_test.go @@ -155,7 +155,7 @@ func TestSplitDelayToAvoidSnapshot(t *testing.T) { } s := maybeDelaySplitToAvoidSnapshot(ctx, h) assert.EqualValues(t, "; delayed by 5.5s to resolve: replica r1/2 not caught up: "+ - state.String()+" match=0 next=0 paused (without success)", s) + state.String()+" match=0 next=0 sentCommit=0 matchCommit=0 paused (without success)", s) }) } @@ -193,6 +193,7 @@ func TestSplitDelayToAvoidSnapshot(t *testing.T) { } } s := maybeDelaySplitToAvoidSnapshot(ctx, h) - assert.EqualValues(t, "; delayed by 2.5s to resolve: replica r1/2 not caught up: StateProbe match=0 next=0", s) + assert.EqualValues(t, "; delayed by 2.5s to resolve: replica r1/2 not caught up: "+ + "StateProbe match=0 next=0 sentCommit=0 matchCommit=0", s) }) } diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 6b44bd1939b8..9586a23cf410 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -45,6 +45,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvadmission" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowhandle" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/replica_rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvstorage" @@ -55,6 +57,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness" + slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tenantrate" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnrecovery" @@ -415,6 +418,7 @@ func newRaftConfig( PreVote: true, CheckQuorum: storeCfg.RaftEnableCheckQuorum, + CRDBVersion: storeCfg.Settings.Version, } } @@ -912,6 +916,12 @@ type Store struct { // transport is connected to, and is used by the canonical // replicaFlowControlIntegration implementation. raftTransportForFlowControl raftTransportForFlowControl + + // kvflowRangeControllerFactory is used for replication AC (flow control) V2 + // to create new range controllers which mediate the flow of requests to + // replicas. + kvflowRangeControllerFactory replica_rac2.RangeControllerFactory + // metricsMu protects the collection and update of engine metrics. metricsMu syncutil.Mutex @@ -1152,17 +1162,18 @@ type StoreConfig struct { AmbientCtx log.AmbientContext base.RaftConfig - DefaultSpanConfig roachpb.SpanConfig - Settings *cluster.Settings - Clock *hlc.Clock - Gossip *gossip.Gossip - DB *kv.DB - NodeLiveness *liveness.NodeLiveness - StorePool *storepool.StorePool - Transport *RaftTransport - NodeDialer *nodedialer.Dialer - RPCContext *rpc.Context - RangeDescriptorCache *rangecache.RangeCache + DefaultSpanConfig roachpb.SpanConfig + Settings *cluster.Settings + Clock *hlc.Clock + Gossip *gossip.Gossip + DB *kv.DB + NodeLiveness *liveness.NodeLiveness + StorePool *storepool.StorePool + Transport *RaftTransport + StoreLivenessTransport *storeliveness.Transport + NodeDialer *nodedialer.Dialer + RPCContext *rpc.Context + RangeDescriptorCache *rangecache.RangeCache ClosedTimestampSender *sidetransport.Sender ClosedTimestampReceiver sidetransportReceiver @@ -1274,6 +1285,14 @@ type StoreConfig struct { // KVFlowHandleMetrics is a shared metrics struct for all // kvflowcontrol.Handles. KVFlowHandleMetrics *kvflowhandle.Metrics + // KVFlowAdmittedPiggybacker is used for replication AC (flow control) v2. + KVFlowAdmittedPiggybacker replica_rac2.AdmittedPiggybacker + // KVFlowStreamTokenProvider is used for replication AC (flow control) v2 to + // provide token counters for replication streams. + KVFlowStreamTokenProvider *rac2.StreamTokenCounterProvider + // KVFlowEvalWaitMetrics is used for replication AC (flow control) v2 to + // track requests waiting for evaluation. + KVFlowEvalWaitMetrics *rac2.EvalWaitMetrics // SchedulerLatencyListener listens in on scheduling latencies, information // that's then used to adjust various admission control components (like how @@ -1532,6 +1551,17 @@ func NewStore( cfg.RaftSchedulerConcurrency, cfg.RaftSchedulerShardSize, cfg.RaftSchedulerConcurrencyPriority, cfg.RaftElectionTimeoutTicks) + // kvflowRangeControllerFactory depends on the raft scheduler, so it must be + // created per-store rather than per-node like other replication admission + // control (flow control) v2 components. + s.kvflowRangeControllerFactory = replica_rac2.NewRangeControllerFactoryImpl( + s.Clock(), + s.cfg.KVFlowEvalWaitMetrics, + s.cfg.KVFlowStreamTokenProvider, + replica_rac2.NewStreamCloseScheduler( + s.stopper, timeutil.DefaultTimeSource{}, s.scheduler), + ) + // Run a log SyncWaiter loop for every 32 raft scheduler goroutines. // Experiments on c5d.12xlarge instances (48 vCPUs, the largest single-socket // instance AWS offers) show that with fewer SyncWaiters, raft log callback @@ -2170,8 +2200,18 @@ func (s *Store) Start(ctx context.Context, stopper *stop.Stopper) error { ) s.metrics.registry.AddMetricStruct(s.recoveryMgr.Metrics()) - // TODO(mira): create the store liveness support manager here. - // s.storeLiveness = ... + heartbeatInterval, livenessInterval := s.cfg.StoreLivenessDurations() + supportGracePeriod := s.cfg.RPCContext.StoreLivenessWithdrawalGracePeriod() + options := storeliveness.NewOptions(heartbeatInterval, livenessInterval, supportGracePeriod) + sm := storeliveness.NewSupportManager( + slpb.StoreIdent{NodeID: s.nodeDesc.NodeID}, s.StateEngine(), options, + s.cfg.Settings, s.stopper, s.cfg.Clock, s.cfg.StoreLivenessTransport, + ) + s.cfg.StoreLivenessTransport.ListenMessages(s.StoreID(), sm) + s.storeLiveness = sm + if err = sm.Start(ctx); err != nil { + return errors.Wrap(err, "starting store liveness") + } s.rangeIDAlloc = idAlloc @@ -3214,6 +3254,7 @@ func (s *Store) updateReplicationGauges(ctx context.Context) error { unavailableRangeCount int64 underreplicatedRangeCount int64 overreplicatedRangeCount int64 + decommissioningRangeCount int64 behindCount int64 pausedFollowerCount int64 ioOverload float64 @@ -3233,6 +3274,7 @@ func (s *Store) updateReplicationGauges(ctx context.Context) error { ) now := s.cfg.Clock.NowAsClockTimestamp() + goNow := now.ToTimestamp().GoTime() clusterNodes := s.ClusterNodeCount() s.mu.RLock() @@ -3296,6 +3338,12 @@ func (s *Store) updateReplicationGauges(ctx context.Context) error { if metrics.Overreplicated { overreplicatedRangeCount++ } + if metrics.Decommissioning { + // NB: Enqueue is disabled by default from here and throttled async if + // enabled. + rep.maybeEnqueueProblemRange(ctx, goNow, metrics.LeaseValid, metrics.Leaseholder) + decommissioningRangeCount++ + } } pausedFollowerCount += metrics.PausedFollowerCount pendingRaftProposalCount += metrics.PendingRaftProposalCount @@ -3359,6 +3407,7 @@ func (s *Store) updateReplicationGauges(ctx context.Context) error { s.metrics.UnavailableRangeCount.Update(unavailableRangeCount) s.metrics.UnderReplicatedRangeCount.Update(underreplicatedRangeCount) s.metrics.OverReplicatedRangeCount.Update(overreplicatedRangeCount) + s.metrics.DecommissioningRangeCount.Update(decommissioningRangeCount) s.metrics.RaftLogFollowerBehindCount.Update(behindCount) s.metrics.RaftPausedFollowerCount.Update(pausedFollowerCount) s.metrics.IOOverload.Update(ioOverload) diff --git a/pkg/kv/kvserver/store_raft.go b/pkg/kv/kvserver/store_raft.go index 329164bfd7dc..697c0cbdab55 100644 --- a/pkg/kv/kvserver/store_raft.go +++ b/pkg/kv/kvserver/store_raft.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/raft" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -377,7 +378,7 @@ func (s *Store) processRaftRequestWithReplica( defer r.MeasureRaftCPUNanos(grunning.Time()) if verboseRaftLoggingEnabled() { - log.Infof(ctx, "incoming raft message:\n%s", raftDescribeMessage(req.Message, raftEntryFormatter)) + log.Infof(ctx, "incoming raft message:\n%s", raft.DescribeMessage(req.Message, raftEntryFormatter)) } if req.Message.Type == raftpb.MsgSnap { @@ -720,6 +721,14 @@ func (s *Store) processTick(_ context.Context, rangeID roachpb.RangeID) bool { return exists // ready } +func (s *Store) processRACv2PiggybackedAdmitted(ctx context.Context, rangeID roachpb.RangeID) bool { + r, ok := s.mu.replicasByRangeID.Load(rangeID) + if !ok { + return false + } + return r.processRACv2PiggybackedAdmitted(ctx) +} + // nodeIsLiveCallback is invoked when a node transitions from non-live to live. // Iterate through all replicas and find any which belong to ranges containing // the implicated node. Unquiesce if currently quiesced and the node's replica diff --git a/pkg/kv/kvserver/store_test.go b/pkg/kv/kvserver/store_test.go index f3b381423485..1c4a7f475df7 100644 --- a/pkg/kv/kvserver/store_test.go +++ b/pkg/kv/kvserver/store_test.go @@ -47,6 +47,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/logstore" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer" "github.com/cockroachdb/cockroach/pkg/raft" @@ -245,8 +246,12 @@ func createTestStoreWithoutStart( NoopStoresFlowControlIntegration{}, NoopRaftTransportDisconnectListener{}, (*node_rac2.AdmittedPiggybacker)(nil), + nil, /* PiggybackedAdmittedResponseScheduler */ nil, /* knobs */ ) + cfg.StoreLivenessTransport = storeliveness.NewTransport( + cfg.AmbientCtx, stopper, cfg.Clock, cfg.NodeDialer, server, + ) stores := NewStores(cfg.AmbientCtx, cfg.Clock) nodeDesc := &roachpb.NodeDescriptor{NodeID: 1} @@ -929,7 +934,11 @@ func TestMarkReplicaInitialized(t *testing.T) { ReplicaID: 1, }} desc.NextReplicaID = 2 - r.setDescRaftMuLocked(ctx, desc) + func() { + r.raftMu.Lock() + defer r.raftMu.Unlock() + r.setDescRaftMuLocked(ctx, desc) + }() expectedResult = "not in uninitReplicas" func() { r.mu.Lock() diff --git a/pkg/kv/kvserver/storeliveness/BUILD.bazel b/pkg/kv/kvserver/storeliveness/BUILD.bazel index 233e9c1dfa56..59c6b88c52d7 100644 --- a/pkg/kv/kvserver/storeliveness/BUILD.bazel +++ b/pkg/kv/kvserver/storeliveness/BUILD.bazel @@ -3,20 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "storeliveness", srcs = [ + "config.go", "fabric.go", "persist.go", "requester_state.go", + "support_manager.go", "supporter_state.go", "transport.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness", visibility = ["//visibility:public"], deps = [ + "//pkg/clusterversion", "//pkg/keys", "//pkg/kv/kvserver/storeliveness/storelivenesspb", "//pkg/roachpb", "//pkg/rpc", "//pkg/rpc/nodedialer", + "//pkg/settings", + "//pkg/settings/cluster", "//pkg/storage", "//pkg/util/hlc", "//pkg/util/log", @@ -26,6 +31,7 @@ go_library( "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@org_golang_google_grpc//:go_default_library", + "@org_golang_x_exp//maps", ], ) @@ -34,6 +40,7 @@ go_test( srcs = [ "persist_test.go", "store_liveness_test.go", + "support_manager_test.go", "transport_test.go", ], data = glob(["testdata/**"]), @@ -55,6 +62,8 @@ go_test( "//pkg/util/metric", "//pkg/util/netutil", "//pkg/util/stop", + "//pkg/util/syncutil", + "//pkg/util/timeutil", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/pkg/kv/kvserver/storeliveness/config.go b/pkg/kv/kvserver/storeliveness/config.go new file mode 100644 index 000000000000..0969820f1d70 --- /dev/null +++ b/pkg/kv/kvserver/storeliveness/config.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storeliveness + +import "time" + +// Options includes all Store Liveness durations needed by the SupportManager. +type Options struct { + // HeartbeatInterval determines how often Store Liveness sends heartbeats. + HeartbeatInterval time.Duration + // LivenessInterval determines the Store Liveness support expiration time. + LivenessInterval time.Duration + // SupportExpiryInterval determines how often Store Liveness checks if support + // should be withdrawn. + SupportExpiryInterval time.Duration + // IdleSupportFromInterval determines how ofter Store Liveness checks if any + // stores have not appeared in a SupportFrom call recently. + IdleSupportFromInterval time.Duration + // SupportWithdrawalGracePeriod determines how long Store Liveness should + // wait after restart before withdrawing support. It helps prevent support + // churn until the first heartbeats are delivered. + SupportWithdrawalGracePeriod time.Duration +} + +// NewOptions instantiates the Store Liveness Options. +func NewOptions( + heartbeatInterval time.Duration, + livenessInterval time.Duration, + supportWithdrawalGracePeriod time.Duration, +) Options { + return Options{ + HeartbeatInterval: heartbeatInterval, + LivenessInterval: livenessInterval, + SupportExpiryInterval: 1 * time.Second, + IdleSupportFromInterval: 1 * time.Minute, + SupportWithdrawalGracePeriod: supportWithdrawalGracePeriod, + } +} diff --git a/pkg/kv/kvserver/storeliveness/fabric.go b/pkg/kv/kvserver/storeliveness/fabric.go index 92d275e41371..572bc22fe6ea 100644 --- a/pkg/kv/kvserver/storeliveness/fabric.go +++ b/pkg/kv/kvserver/storeliveness/fabric.go @@ -11,6 +11,8 @@ package storeliveness import ( + "context" + slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) @@ -50,4 +52,11 @@ type Fabric interface { // and S_local will initiate a heartbeat loop to S_remote in order to // request support so that future calls to SupportFrom may succeed. SupportFrom(id slpb.StoreIdent) (slpb.Epoch, hlc.Timestamp, bool) + + // SupportFromEnabled determines if Store Liveness requests support from + // other stores. If it returns true, then Store Liveness is sending + // heartbeats and responding to heartbeats. If it returns false, Store + // Liveness is not sending heartbeats but is still responding to heartbeats + // to ensure any promise by the local store to provide support is still kept. + SupportFromEnabled(ctx context.Context) bool } diff --git a/pkg/kv/kvserver/storeliveness/requester_state.go b/pkg/kv/kvserver/storeliveness/requester_state.go index 581c29a524c8..fafb58d8b824 100644 --- a/pkg/kv/kvserver/storeliveness/requester_state.go +++ b/pkg/kv/kvserver/storeliveness/requester_state.go @@ -26,11 +26,47 @@ type requesterState struct { // meta stores the RequesterMeta, including the max timestamp and max epoch at // which this store has requested support. meta slpb.RequesterMeta - // supportFrom stores the SupportState for each remote store from which this - // store has received support. - supportFrom map[slpb.StoreIdent]slpb.SupportState + // supportFrom stores a pointer to requestedSupport for each remote store + // from which this store has received support. + supportFrom map[slpb.StoreIdent]*requestedSupport } +// requestedSupport is a wrapper around SupportState that also indicates whether +// support from the store has been queried via SupportFrom. +type requestedSupport struct { + // state is the SupportState corresponding to a single store from which + // support was requested. + state slpb.SupportState + // recentlyQueried indicates if support from the store has been queried + // (by calling SupportFrom) recently. Unlike all other fields in + // requesterState, updating this field does not require locking + // requesterStateHandler.mu for writing. This is because recentlyQueried is + // not updated as part of the support state and meta process; it updated in + // SupportFrom, so it's important that updating recentlyQueried doesn't lock + // requesterStateHandler.mu for writing. However, updating recentlyQueried + // needs to lock requesterStateHandler.mu for reading to ensure that no new + // stores are added to the supportFrom map. + recentlyQueried atomic.Int32 +} + +// recentlyQueried transitions between three possible values to make sure stores +// are not marked as idle prematurely. A store transitions from active to +// inactive every IdleSupportFromInterval and back to active upon being queried +// in a SupportFrom call. If another IdleSupportFromInterval expires after a +// store was marked as inactive, it will be marked as idle and will not be sent +// heartbeats until it transitions to active again. +const ( + // active indicates that the store has been queried in a SupportFrom call + // recently (within IdleSupportFromInterval). + active int32 = iota + // inactive indicates that the store has NOT been queried in a SupportFrom + // call recently (within IdleSupportFromInterval). + inactive + // idle indicates that it has been even longer since the store was queried + // in a SupportFrom (more than IdleSupportFromInterval). + idle +) + // requesterStateHandler is the main interface for handling support from other // stores. The typical interactions with requesterStateHandler are: // - getSupportFrom(id slpb.StoreIdent) @@ -39,9 +75,11 @@ type requesterState struct { // - rsfu := checkOutUpdate() // rsfu.getHeartbeatsToSend(now hlc.Timestamp, interval time.Duration) // checkInUpdate(rsfu) +// finishUpdate(rsfu) // - rsfu := checkOutUpdate() // rsfu.handleHeartbeatResponse(msg slpb.Message) // checkInUpdate(rsfu) +// finishUpdate(rsfu) // // Only one update can be in progress to ensure that multiple mutation methods // are not run concurrently. Adding or removing a store while an update is in @@ -66,7 +104,7 @@ func newRequesterStateHandler() *requesterStateHandler { rsh := &requesterStateHandler{ requesterState: requesterState{ meta: slpb.RequesterMeta{}, - supportFrom: make(map[slpb.StoreIdent]slpb.SupportState), + supportFrom: make(map[slpb.StoreIdent]*requestedSupport), }, } rsh.update.Store( @@ -74,7 +112,7 @@ func newRequesterStateHandler() *requesterStateHandler { checkedIn: &rsh.requesterState, inProgress: requesterState{ meta: slpb.RequesterMeta{}, - supportFrom: make(map[slpb.StoreIdent]slpb.SupportState), + supportFrom: make(map[slpb.StoreIdent]*requestedSupport), }, }, ) @@ -105,8 +143,15 @@ type requesterStateForUpdate struct { func (rsh *requesterStateHandler) getSupportFrom(id slpb.StoreIdent) (slpb.SupportState, bool) { rsh.mu.RLock() defer rsh.mu.RUnlock() - ss, ok := rsh.requesterState.supportFrom[id] - return ss, ok + rs, ok := rsh.requesterState.supportFrom[id] + var supportState slpb.SupportState + if ok { + // If a store is present, set recentlyQueried to true. Otherwise, if + // this is a new store, recentlyQueried will be set to true in addStore. + rs.recentlyQueried.Store(active) + supportState = rs.state + } + return supportState, ok } // addStore adds a store to the requesterState.supportFrom map, if not present. @@ -114,24 +159,37 @@ func (rsh *requesterStateHandler) addStore(id slpb.StoreIdent) { // Adding a store doesn't require persisting anything to disk, so it doesn't // need to go through the full checkOut/checkIn process. However, we still // check out the update to ensure that there are no concurrent updates. - defer rsh.checkInUpdate(rsh.checkOutUpdate()) + defer rsh.finishUpdate(rsh.checkOutUpdate()) rsh.mu.Lock() defer rsh.mu.Unlock() if _, ok := rsh.requesterState.supportFrom[id]; !ok { - ss := slpb.SupportState{Target: id, Epoch: rsh.requesterState.meta.MaxEpoch} - rsh.requesterState.supportFrom[id] = ss + rs := requestedSupport{ + state: slpb.SupportState{Target: id, Epoch: rsh.requesterState.meta.MaxEpoch}, + } + // Adding a store is done in response to SupportFrom, so it's ok to set + // recentlyQueried to active here. This also ensures the store will not + // be removed immediately after adding. + rs.recentlyQueried.Store(active) + rsh.requesterState.supportFrom[id] = &rs } } -// removeStore removes a store from the requesterState.supportFrom map. -func (rsh *requesterStateHandler) removeStore(id slpb.StoreIdent) { - // Removing a store doesn't require persisting anything to disk, so it doesn't +// markIdleStores marks all stores in the requesterState.supportFrom map as +// idle if they have not appeared in a getSupportFrom call since the last time +// markIdleStores was called. +func (rsh *requesterStateHandler) markIdleStores() { + // Marking stores doesn't require persisting anything to disk, so it doesn't // need to go through the full checkOut/checkIn process. However, we still // check out the update to ensure that there are no concurrent updates. - defer rsh.checkInUpdate(rsh.checkOutUpdate()) - rsh.mu.Lock() - defer rsh.mu.Unlock() - delete(rsh.requesterState.supportFrom, id) + defer rsh.finishUpdate(rsh.checkOutUpdate()) + + rsh.mu.RLock() + defer rsh.mu.RUnlock() + for _, rs := range rsh.requesterState.supportFrom { + if !rs.recentlyQueried.CompareAndSwap(active, inactive) { + rs.recentlyQueried.CompareAndSwap(inactive, idle) + } + } } // Functions for handling requesterState updates. @@ -168,11 +226,15 @@ func (rsfu *requesterStateForUpdate) getMeta() slpb.RequesterMeta { func (rsfu *requesterStateForUpdate) getSupportFrom( storeID slpb.StoreIdent, ) (slpb.SupportState, bool) { - ss, ok := rsfu.inProgress.supportFrom[storeID] + rs, ok := rsfu.inProgress.supportFrom[storeID] if !ok { - ss, ok = rsfu.checkedIn.supportFrom[storeID] + rs, ok = rsfu.checkedIn.supportFrom[storeID] + } + var supportState slpb.SupportState + if ok { + supportState = rs.state } - return ss, ok + return supportState, ok } // reset clears the inProgress view of requesterStateForUpdate. @@ -221,10 +283,6 @@ func (rsh *requesterStateHandler) checkOutUpdate() *requesterStateForUpdate { // updates from the inProgress view. It clears the inProgress view, and swaps it // back in requesterStateHandler.update to be checked out by future updates. func (rsh *requesterStateHandler) checkInUpdate(rsfu *requesterStateForUpdate) { - defer func() { - rsfu.reset() - rsh.update.Swap(rsfu) - }() if rsfu.inProgress.meta == (slpb.RequesterMeta{}) && len(rsfu.inProgress.supportFrom) == 0 { return } @@ -234,11 +292,19 @@ func (rsh *requesterStateHandler) checkInUpdate(rsfu *requesterStateForUpdate) { rsfu.assertMeta() rsfu.checkedIn.meta = rsfu.inProgress.meta } - for storeID, ss := range rsfu.inProgress.supportFrom { - rsfu.checkedIn.supportFrom[storeID] = ss + for storeID, rs := range rsfu.inProgress.supportFrom { + rsfu.checkedIn.supportFrom[storeID].state = rs.state } } +// finishUpdate performs cleanup after a successful or unsuccessful +// checkInUpdate. It resets the requesterStateForUpdate in-progress state and +// makes it available for future check out. +func (rsh *requesterStateHandler) finishUpdate(rsfu *requesterStateForUpdate) { + rsfu.reset() + rsh.update.Swap(rsfu) +} + // Functions for generating heartbeats. // getHeartbeatsToSend updates MaxRequested and generates heartbeats. These @@ -272,12 +338,16 @@ func (rsfu *requesterStateForUpdate) generateHeartbeats(from slpb.StoreIdent) [] len(rsfu.inProgress.supportFrom) == 0, "reading from requesterStateForUpdate."+ "checkedIn.supportFrom while requesterStateForUpdate.inProgress.supportFrom is not empty", ) - for _, ss := range rsfu.checkedIn.supportFrom { + for _, rs := range rsfu.checkedIn.supportFrom { + // Skip idle stores. + if rs.recentlyQueried.Load() == idle { + continue + } heartbeat := slpb.Message{ Type: slpb.MsgHeartbeat, From: from, - To: ss.Target, - Epoch: ss.Epoch, + To: rs.state.Target, + Epoch: rs.state.Epoch, Expiration: maxRequested, } heartbeats = append(heartbeats, heartbeat) @@ -290,26 +360,28 @@ func (rsfu *requesterStateForUpdate) generateHeartbeats(from slpb.StoreIdent) [] // handleHeartbeatResponse handles a single heartbeat response message. It // updates the inProgress view of requesterStateForUpdate only if there are any // changes. -func (rsfu *requesterStateForUpdate) handleHeartbeatResponse(msg slpb.Message) { +func (rsfu *requesterStateForUpdate) handleHeartbeatResponse(msg *slpb.Message) { from := msg.From meta := rsfu.getMeta() ss, ok := rsfu.getSupportFrom(from) + // If the store is not present in the map, ignore the heartbeat response; + // it is likely an old heartbeat response before the local store restarted. if !ok { - ss = slpb.SupportState{Target: from} + return } metaNew, ssNew := handleHeartbeatResponse(meta, ss, msg) if meta != metaNew { rsfu.inProgress.meta = metaNew } if ss != ssNew { - rsfu.inProgress.supportFrom[from] = ssNew + rsfu.inProgress.supportFrom[from] = &requestedSupport{state: ssNew} } } // handleHeartbeatResponse contains the core logic for updating the epoch and // expiration for a support provider upon receiving a heartbeat response. func handleHeartbeatResponse( - rm slpb.RequesterMeta, ss slpb.SupportState, msg slpb.Message, + rm slpb.RequesterMeta, ss slpb.SupportState, msg *slpb.Message, ) (slpb.RequesterMeta, slpb.SupportState) { if rm.MaxEpoch < msg.Epoch { rm.MaxEpoch = msg.Epoch diff --git a/pkg/kv/kvserver/storeliveness/store_liveness_test.go b/pkg/kv/kvserver/storeliveness/store_liveness_test.go index 45114948b677..09afce707515 100644 --- a/pkg/kv/kvserver/storeliveness/store_liveness_test.go +++ b/pkg/kv/kvserver/storeliveness/store_liveness_test.go @@ -20,92 +20,72 @@ import ( slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" + clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/datadriven" + "github.com/stretchr/testify/require" ) func TestStoreLiveness(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - ctx := context.Background() - storeID := slpb.StoreIdent{NodeID: roachpb.NodeID(1), StoreID: roachpb.StoreID(1)} - datadriven.Walk( t, datapathutils.TestDataPath(t), func(t *testing.T, path string) { - engine := storage.NewDefaultInMemForTesting() + ctx := context.Background() + storeID := slpb.StoreIdent{NodeID: roachpb.NodeID(1), StoreID: roachpb.StoreID(1)} + engine := &testEngine{Engine: storage.NewDefaultInMemForTesting()} defer engine.Close() - ss := newSupporterStateHandler() - rs := newRequesterStateHandler() - if err := onRestart(ctx, rs, ss, engine); err != nil { - t.Errorf("persisting data while restarting failed: %v", err) - } + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := timeutil.NewManualTime(timeutil.Unix(1, 0)) + clock := hlc.NewClockForTesting(manual) + sender := testMessageSender{} + sm := NewSupportManager(storeID, engine, Options{}, settings, stopper, clock, &sender) + require.NoError(t, sm.onRestart(ctx)) datadriven.RunTest( t, path, func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { - case "add-store": - remoteID := parseStoreID(t, d, "node-id", "store-id") - rs.addStore(remoteID) - return "" - - case "remove-store": - remoteID := parseStoreID(t, d, "node-id", "store-id") - rs.removeStore(remoteID) + case "mark-idle-stores": + sm.requesterStateHandler.markIdleStores() return "" case "support-from": remoteID := parseStoreID(t, d, "node-id", "store-id") - supportState, _ := rs.getSupportFrom(remoteID) - return fmt.Sprintf("requester state: %+v", supportState) + epoch, timestamp, supported := sm.SupportFrom(remoteID) + return fmt.Sprintf( + "epoch: %+v, expiration: %+v, support provided: %v", + epoch, timestamp, supported, + ) case "support-for": remoteID := parseStoreID(t, d, "node-id", "store-id") - supportState := ss.getSupportFor(remoteID) - return fmt.Sprintf("supporter state: %+v", supportState) + epoch, supported := sm.SupportFor(remoteID) + return fmt.Sprintf( + "epoch: %+v, support provided: %v", + epoch, supported, + ) case "send-heartbeats": now := parseTimestamp(t, d, "now") - var interval string - d.ScanArgs(t, "liveness-interval", &interval) - livenessInterval, err := time.ParseDuration(interval) - if err != nil { - t.Errorf("can't parse liveness interval duration %s; error: %v", interval, err) - } - rsfu := rs.checkOutUpdate() - heartbeats := rsfu.getHeartbeatsToSend(storeID, now, livenessInterval) - if err = rsfu.write(ctx, engine); err != nil { - t.Errorf("writing requester state failed: %v", err) - } - rs.checkInUpdate(rsfu) + manual.AdvanceTo(now.GoTime()) + sm.options.LivenessInterval = parseDuration(t, d, "liveness-interval") + sm.maybeAddStores() + sm.sendHeartbeats(ctx) + heartbeats := sender.drainSentMessages() return fmt.Sprintf("heartbeats:\n%s", printMsgs(heartbeats)) case "handle-messages": msgs := parseMsgs(t, d, storeID) - var responses []slpb.Message - rsfu := rs.checkOutUpdate() - ssfu := ss.checkOutUpdate() - for _, msg := range msgs { - switch msg.Type { - case slpb.MsgHeartbeat: - responses = append(responses, ssfu.handleHeartbeat(msg)) - case slpb.MsgHeartbeatResp: - rsfu.handleHeartbeatResponse(msg) - default: - log.Errorf(context.Background(), "unexpected message type: %v", msg.Type) - } - } - if err := rsfu.write(ctx, engine); err != nil { - t.Errorf("writing requester state failed: %v", err) - } - if err := ssfu.write(ctx, engine); err != nil { - t.Errorf("writing supporter state failed: %v", err) - } - rs.checkInUpdate(rsfu) - ss.checkInUpdate(ssfu) + sm.handleMessages(ctx, msgs) + responses := sender.drainSentMessages() if len(responses) > 0 { return fmt.Sprintf("responses:\n%s", printMsgs(responses)) } else { @@ -114,32 +94,51 @@ func TestStoreLiveness(t *testing.T) { case "withdraw-support": now := parseTimestamp(t, d, "now") - ssfu := ss.checkOutUpdate() - ssfu.withdrawSupport(hlc.ClockTimestamp(now)) - if err := ssfu.write(ctx, engine); err != nil { - t.Errorf("writing supporter state failed: %v", err) - } - ss.checkInUpdate(ssfu) + manual.AdvanceTo(now.GoTime()) + sm.withdrawSupport(ctx) return "" case "restart": - ss = newSupporterStateHandler() - rs = newRequesterStateHandler() - if err := onRestart(ctx, rs, ss, engine); err != nil { - t.Errorf("persisting data while restarting failed: %v", err) - } + now := parseTimestamp(t, d, "now") + gracePeriod := parseDuration(t, d, "grace-period") + o := Options{SupportWithdrawalGracePeriod: gracePeriod} + sm = NewSupportManager( + storeID, engine, o, settings, stopper, clock, &sender, + ) + manual.AdvanceTo(now.GoTime()) + require.NoError(t, sm.onRestart(ctx)) + return "" + + case "error-on-write": + var errorOnWrite bool + d.ScanArgs(t, "on", &errorOnWrite) + engine.errorOnWrite = errorOnWrite return "" case "debug-requester-state": + var sortedSupportMap []string + for _, support := range sm.requesterStateHandler.requesterState.supportFrom { + sortedSupportMap = append( + sortedSupportMap, fmt.Sprintf("%+v", support.state), + ) + } + slices.Sort(sortedSupportMap) return fmt.Sprintf( - "meta:\n%+v\nsupport from:\n%+v", rs.requesterState.meta, - printSupportMap(rs.requesterState.supportFrom), + "meta:\n%+v\nsupport from:\n%+v", + sm.requesterStateHandler.requesterState.meta, + strings.Join(sortedSupportMap, "\n"), ) case "debug-supporter-state": + var sortedSupportMap []string + for _, support := range sm.supporterStateHandler.supporterState.supportFor { + sortedSupportMap = append(sortedSupportMap, fmt.Sprintf("%+v", support)) + } + slices.Sort(sortedSupportMap) return fmt.Sprintf( - "meta:\n%+v\nsupport for:\n%+v", ss.supporterState.meta, - printSupportMap(ss.supporterState.supportFor), + "meta:\n%+v\nsupport for:\n%+v", + sm.supporterStateHandler.supporterState.meta, + strings.Join(sortedSupportMap, "\n"), ) default: @@ -161,15 +160,6 @@ func printMsgs(msgs []slpb.Message) string { return strings.Join(sortedMsgs, "\n") } -func printSupportMap(m map[slpb.StoreIdent]slpb.SupportState) string { - var sortedSupportMap []string - for _, support := range m { - sortedSupportMap = append(sortedSupportMap, fmt.Sprintf("%+v", support)) - } - slices.Sort(sortedSupportMap) - return strings.Join(sortedSupportMap, "\n") -} - func parseStoreID( t *testing.T, d *datadriven.TestData, nodeStr string, storeStr string, ) slpb.StoreIdent { @@ -183,15 +173,25 @@ func parseStoreID( } } -func parseTimestamp(t *testing.T, d *datadriven.TestData, timeStr string) hlc.Timestamp { +func parseTimestamp(t *testing.T, d *datadriven.TestData, name string) hlc.Timestamp { var wallTimeSecs int64 - d.ScanArgs(t, timeStr, &wallTimeSecs) + d.ScanArgs(t, name, &wallTimeSecs) wallTime := wallTimeSecs * int64(time.Second) return hlc.Timestamp{WallTime: wallTime} } -func parseMsgs(t *testing.T, d *datadriven.TestData, storeIdent slpb.StoreIdent) []slpb.Message { - var msgs []slpb.Message +func parseDuration(t *testing.T, d *datadriven.TestData, name string) time.Duration { + var durationStr string + d.ScanArgs(t, name, &durationStr) + duration, err := time.ParseDuration(durationStr) + if err != nil { + t.Errorf("can't parse duration %s; error: %v", durationStr, err) + } + return duration +} + +func parseMsgs(t *testing.T, d *datadriven.TestData, storeIdent slpb.StoreIdent) []*slpb.Message { + var msgs []*slpb.Message lines := strings.Split(d.Input, "\n") for _, line := range lines { var err error @@ -217,7 +217,7 @@ func parseMsgs(t *testing.T, d *datadriven.TestData, storeIdent slpb.StoreIdent) var epoch int64 d.ScanArgs(t, "epoch", &epoch) expiration := parseTimestamp(t, d, "expiration") - msg := slpb.Message{ + msg := &slpb.Message{ Type: msgType, From: remoteID, To: storeIdent, @@ -228,22 +228,3 @@ func parseMsgs(t *testing.T, d *datadriven.TestData, storeIdent slpb.StoreIdent) } return msgs } - -// TODO(mira): Move this to the SupportManager. -func onRestart( - ctx context.Context, rs *requesterStateHandler, ss *supporterStateHandler, engine storage.Engine, -) error { - if err := ss.read(ctx, engine); err != nil { - return err - } - if err := rs.read(ctx, engine); err != nil { - return err - } - rsfu := rs.checkOutUpdate() - rsfu.incrementMaxEpoch() - if err := rsfu.write(ctx, engine); err != nil { - return err - } - rs.checkInUpdate(rsfu) - return nil -} diff --git a/pkg/kv/kvserver/storeliveness/storelivenesspb/service.go b/pkg/kv/kvserver/storeliveness/storelivenesspb/service.go index 0d053322a7bd..d7069aa5984f 100644 --- a/pkg/kv/kvserver/storeliveness/storelivenesspb/service.go +++ b/pkg/kv/kvserver/storeliveness/storelivenesspb/service.go @@ -15,3 +15,6 @@ package storelivenesspb // increment the epoch for which it requests support from another store (e.g. // after a restart). type Epoch int64 + +// SafeValue implements the redact.SafeValue interface. +func (e Epoch) SafeValue() {} diff --git a/pkg/kv/kvserver/storeliveness/support_manager.go b/pkg/kv/kvserver/storeliveness/support_manager.go new file mode 100644 index 000000000000..2c59f2749dfa --- /dev/null +++ b/pkg/kv/kvserver/storeliveness/support_manager.go @@ -0,0 +1,407 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storeliveness + +import ( + "context" + "time" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" + "github.com/cockroachdb/cockroach/pkg/settings" + clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "golang.org/x/exp/maps" +) + +var Enabled = settings.RegisterBoolSetting( + settings.SystemOnly, + "kv.store_liveness.enabled", + "if enabled, store liveness will heartbeat periodically; if disabled, "+ + "store liveness will still respond to heartbeats and calls to SupportFor", + true, +) + +// MessageSender is the interface that defines how Store Liveness messages are +// sent. Transport is the production implementation of MessageSender. +type MessageSender interface { + SendAsync(msg slpb.Message) (sent bool) +} + +// SupportManager orchestrates requesting and providing Store Liveness support. +type SupportManager struct { + storeID slpb.StoreIdent + engine storage.Engine + options Options + settings *clustersettings.Settings + stopper *stop.Stopper + clock *hlc.Clock + sender MessageSender + receiveQueue receiveQueue + storesToAdd storesToAdd + minWithdrawalTS hlc.Timestamp + supporterStateHandler *supporterStateHandler + requesterStateHandler *requesterStateHandler +} + +// NewSupportManager creates a new Store Liveness SupportManager. The main +// goroutine that processes Store Liveness messages is initialized +// separately, via Start. +func NewSupportManager( + storeID slpb.StoreIdent, + engine storage.Engine, + options Options, + settings *clustersettings.Settings, + stopper *stop.Stopper, + clock *hlc.Clock, + sender MessageSender, +) *SupportManager { + return &SupportManager{ + storeID: storeID, + engine: engine, + options: options, + settings: settings, + stopper: stopper, + clock: clock, + sender: sender, + receiveQueue: newReceiveQueue(), + storesToAdd: newStoresToAdd(), + requesterStateHandler: newRequesterStateHandler(), + supporterStateHandler: newSupporterStateHandler(), + } +} + +// HandleMessage implements the MessageHandler interface. It appends incoming +// messages to a queue and does not block on processing the messages. +func (sm *SupportManager) HandleMessage(msg *slpb.Message) { + sm.receiveQueue.Append(msg) +} + +var _ MessageHandler = (*SupportManager)(nil) + +// SupportFor implements the Fabric interface. It delegates the response to the +// SupportManager's supporterStateHandler. +func (sm *SupportManager) SupportFor(id slpb.StoreIdent) (slpb.Epoch, bool) { + ss := sm.supporterStateHandler.getSupportFor(id) + // An empty expiration implies support has expired. + if ss.Expiration.IsEmpty() { + return 0, false + } + return ss.Epoch, true +} + +// SupportFrom implements the Fabric interface. It delegates the response to the +// SupportManager's supporterStateHandler. +func (sm *SupportManager) SupportFrom(id slpb.StoreIdent) (slpb.Epoch, hlc.Timestamp, bool) { + ss, ok := sm.requesterStateHandler.getSupportFrom(id) + if !ok { + // If this is the first time SupportFrom has been called for this store, + // the store will be added to requesterStateHandler before the next + // round of heartbeats. Multiple SupportFrom calls can race and call + // storesToAdd.addStore concurrently but that's ok because storesToAdd + // uses a map to avoid duplicates, and the requesterStateHandler's + // addStore checks if the store exists before adding it. + sm.storesToAdd.addStore(id) + log.VInfof(context.Background(), 2, + "store %+v enqueued to add remote store %+v", sm.storeID, id, + ) + return 0, hlc.Timestamp{}, false + } + // An empty expiration implies support has expired. + if ss.Expiration.IsEmpty() { + return 0, hlc.Timestamp{}, false + } + return ss.Epoch, ss.Expiration, true +} + +// SupportFromEnabled implements the Fabric interface and determines if Store +// Liveness sends heartbeats. It returns true if both the cluster setting and +// version gate are on. +func (sm *SupportManager) SupportFromEnabled(ctx context.Context) bool { + clusterSettingEnabled := Enabled.Get(&sm.settings.SV) + versionGateEnabled := sm.settings.Version.IsActive( + ctx, clusterversion.V24_3_StoreLivenessEnabled, + ) + return clusterSettingEnabled && versionGateEnabled +} + +// Start starts the main processing goroutine in startLoop as an async task. +func (sm *SupportManager) Start(ctx context.Context) error { + // onRestart is called synchronously before the start of the main loop in + // order to ensure the SupportManager has loaded all persisted state into + // memory and adjusted its clock accordingly before answering SupportFrom + // and SupportFor requests. + if err := sm.onRestart(ctx); err != nil { + return err + } + + return sm.stopper.RunAsyncTask( + ctx, "storeliveness.SupportManager: loop", sm.startLoop, + ) +} + +// onRestart initializes the SupportManager with state persisted on disk. +func (sm *SupportManager) onRestart(ctx context.Context) error { + // Load the supporter and requester state from disk. + if err := sm.supporterStateHandler.read(ctx, sm.engine); err != nil { + return err + } + if err := sm.requesterStateHandler.read(ctx, sm.engine); err != nil { + return err + } + // Advance the clock to the maximum withdrawal time. + if err := sm.clock.UpdateAndCheckMaxOffset( + ctx, sm.supporterStateHandler.supporterState.meta.MaxWithdrawn, + ); err != nil { + return err + } + // Wait out the previous maximum requested time. + if err := sm.clock.SleepUntil( + ctx, sm.requesterStateHandler.requesterState.meta.MaxRequested, + ); err != nil { + return err + } + // Set the minimum withdrawal time to give other stores a grace period + // before losing support. + sm.minWithdrawalTS = sm.clock.Now().AddDuration(sm.options.SupportWithdrawalGracePeriod) + // Increment the current epoch. + rsfu := sm.requesterStateHandler.checkOutUpdate() + defer sm.requesterStateHandler.finishUpdate(rsfu) + rsfu.incrementMaxEpoch() + if err := rsfu.write(ctx, sm.engine); err != nil { + return err + } + sm.requesterStateHandler.checkInUpdate(rsfu) + return nil +} + +// startLoop contains the main processing goroutine which orchestrates sending +// heartbeats, responding to messages, withdrawing support, adding and removing +// stores. Doing so in a single goroutine serializes these actions and +// simplifies the concurrency model. +func (sm *SupportManager) startLoop(ctx context.Context) { + heartbeatTicker := time.NewTicker(sm.options.HeartbeatInterval) + defer heartbeatTicker.Stop() + + supportExpiryTicker := time.NewTicker(sm.options.SupportExpiryInterval) + defer supportExpiryTicker.Stop() + + idleSupportFromTicker := time.NewTicker(sm.options.IdleSupportFromInterval) + defer idleSupportFromTicker.Stop() + + for { + // NOTE: only listen to the receive queue's signal if we don't already have + // heartbeats to send or support to check. This prevents a constant flow of + // inbound messages from delaying the other work due to the random selection + // between multiple enabled channels. + var receiveQueueSig <-chan struct{} + if len(heartbeatTicker.C) == 0 && len(supportExpiryTicker.C) == 0 { + receiveQueueSig = sm.receiveQueue.Sig() + } + + select { + case <-heartbeatTicker.C: + // First check if any stores need to be added to ensure they are included + // in the round of heartbeats below. + sm.maybeAddStores() + if sm.SupportFromEnabled(ctx) { + sm.sendHeartbeats(ctx) + } + + case <-supportExpiryTicker.C: + sm.withdrawSupport(ctx) + + case <-idleSupportFromTicker.C: + sm.requesterStateHandler.markIdleStores() + + case <-receiveQueueSig: + msgs := sm.receiveQueue.Drain() + sm.handleMessages(ctx, msgs) + + case <-sm.stopper.ShouldQuiesce(): + return + } + } +} + +// maybeAddStores drains storesToAdd and delegates adding any new stores to the +// SupportManager's requesterStateHandler. +func (sm *SupportManager) maybeAddStores() { + sta := sm.storesToAdd.drainStoresToAdd() + for _, store := range sta { + sm.requesterStateHandler.addStore(store) + } +} + +// sendHeartbeats delegates heartbeat generation to the requesterStateHandler +// and sends the resulting messages via Transport. +func (sm *SupportManager) sendHeartbeats(ctx context.Context) { + rsfu := sm.requesterStateHandler.checkOutUpdate() + defer sm.requesterStateHandler.finishUpdate(rsfu) + livenessInterval := sm.options.LivenessInterval + heartbeats := rsfu.getHeartbeatsToSend(sm.storeID, sm.clock.Now(), livenessInterval) + if err := rsfu.write(ctx, sm.engine); err != nil { + log.Warningf(ctx, "failed to write requester meta: %v", err) + return + } + sm.requesterStateHandler.checkInUpdate(rsfu) + + // Send heartbeats to each remote store. + for _, msg := range heartbeats { + if sent := sm.sender.SendAsync(msg); !sent { + log.Warningf(ctx, "sending heartbeat to store %+v failed", msg.To) + } + } + log.VInfof( + ctx, 2, "store %+v sent heartbeats to %d stores", sm.storeID, len(heartbeats), + ) +} + +// withdrawSupport delegates support withdrawal to supporterStateHandler. +func (sm *SupportManager) withdrawSupport(ctx context.Context) { + now := sm.clock.NowAsClockTimestamp() + // Do not withdraw support if the grace period hasn't elapsed yet. + if now.ToTimestamp().Less(sm.minWithdrawalTS) { + return + } + ssfu := sm.supporterStateHandler.checkOutUpdate() + defer sm.supporterStateHandler.finishUpdate(ssfu) + ssfu.withdrawSupport(now) + + batch := sm.engine.NewBatch() + defer batch.Close() + if err := ssfu.write(ctx, batch); err != nil { + log.Warningf(ctx, "failed to write supporter meta and state: %v", err) + return + } + if err := batch.Commit(true /* sync */); err != nil { + log.Warningf(ctx, "failed to commit supporter meta and state: %v", err) + return + } + log.VInfof( + ctx, 2, "store %+v withdrew support from %d stores", + sm.storeID, len(ssfu.inProgress.supportFor), + ) + sm.supporterStateHandler.checkInUpdate(ssfu) +} + +// handleMessages iterates over the given messages and delegates their handling +// to either the requesterStateHandler or supporterStateHandler. It then writes +// all updates to disk in a single batch, and sends any responses via Transport. +func (sm *SupportManager) handleMessages(ctx context.Context, msgs []*slpb.Message) { + log.VInfof(ctx, 2, "store %+v drained receive queue of size %d", sm.storeID, len(msgs)) + rsfu := sm.requesterStateHandler.checkOutUpdate() + defer sm.requesterStateHandler.finishUpdate(rsfu) + ssfu := sm.supporterStateHandler.checkOutUpdate() + defer sm.supporterStateHandler.finishUpdate(ssfu) + var responses []slpb.Message + for _, msg := range msgs { + switch msg.Type { + case slpb.MsgHeartbeat: + responses = append(responses, ssfu.handleHeartbeat(msg)) + case slpb.MsgHeartbeatResp: + rsfu.handleHeartbeatResponse(msg) + default: + log.Errorf(context.Background(), "unexpected message type: %v", msg.Type) + } + } + + batch := sm.engine.NewBatch() + defer batch.Close() + if err := rsfu.write(ctx, batch); err != nil { + log.Warningf(ctx, "failed to write requester meta: %v", err) + return + } + if err := ssfu.write(ctx, batch); err != nil { + log.Warningf(ctx, "failed to write supporter meta: %v", err) + return + } + if err := batch.Commit(true /* sync */); err != nil { + log.Warningf(ctx, "failed to sync supporter and requester state: %v", err) + return + } + sm.requesterStateHandler.checkInUpdate(rsfu) + sm.supporterStateHandler.checkInUpdate(ssfu) + + for _, response := range responses { + _ = sm.sender.SendAsync(response) + } + log.VInfof(ctx, 2, "store %+v sent %d responses", sm.storeID, len(responses)) +} + +// receiveQueue stores all received messages from the MessageHandler and allows +// them to be processed async and in batch. +type receiveQueue struct { + mu struct { + syncutil.Mutex + msgs []*slpb.Message + } + sig chan struct{} +} + +func newReceiveQueue() receiveQueue { + return receiveQueue{ + sig: make(chan struct{}, 1), + } +} + +func (q *receiveQueue) Append(msg *slpb.Message) { + q.mu.Lock() + defer q.mu.Unlock() + q.mu.msgs = append(q.mu.msgs, msg) + select { + case q.sig <- struct{}{}: + default: + } +} + +func (q *receiveQueue) Sig() <-chan struct{} { + return q.sig +} + +func (q *receiveQueue) Drain() []*slpb.Message { + q.mu.Lock() + defer q.mu.Unlock() + msgs := q.mu.msgs + q.mu.msgs = nil + return msgs +} + +// storesToAdd contains a set of stores that Store Liveness periodically adds to +// requesterState.supportFrom. +type storesToAdd struct { + mu syncutil.Mutex + stores map[slpb.StoreIdent]struct{} +} + +func newStoresToAdd() storesToAdd { + return storesToAdd{ + stores: make(map[slpb.StoreIdent]struct{}), + } +} + +func (sta *storesToAdd) addStore(id slpb.StoreIdent) { + sta.mu.Lock() + defer sta.mu.Unlock() + sta.stores[id] = struct{}{} +} + +func (sta *storesToAdd) drainStoresToAdd() []slpb.StoreIdent { + sta.mu.Lock() + defer sta.mu.Unlock() + s := maps.Keys(sta.stores) + clear(sta.stores) + return s +} diff --git a/pkg/kv/kvserver/storeliveness/support_manager_test.go b/pkg/kv/kvserver/storeliveness/support_manager_test.go new file mode 100644 index 000000000000..92b20491414c --- /dev/null +++ b/pkg/kv/kvserver/storeliveness/support_manager_test.go @@ -0,0 +1,444 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storeliveness + +import ( + "context" + "testing" + "time" + + slpb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" +) + +var ( + store = slpb.StoreIdent{NodeID: roachpb.NodeID(1), StoreID: roachpb.StoreID(1)} + remoteStore = slpb.StoreIdent{NodeID: roachpb.NodeID(2), StoreID: roachpb.StoreID(2)} + options = Options{ + HeartbeatInterval: 3 * time.Millisecond, + LivenessInterval: 6 * time.Millisecond, + SupportExpiryInterval: 1 * time.Millisecond, + IdleSupportFromInterval: 1 * time.Minute, + } +) + +// TestSupportManagerRequestsSupport tests that the SupportManager requests and +// establishes support on behalf of the local store. +func TestSupportManagerRequestsSupport(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + engine := storage.NewDefaultInMemForTesting() + defer engine.Close() + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := hlc.NewHybridManualClock() + clock := hlc.NewClockForTesting(manual) + sender := &testMessageSender{} + sm := NewSupportManager(store, engine, options, settings, stopper, clock, sender) + require.NoError(t, sm.Start(ctx)) + + // Start sending heartbeats to the remote store by calling SupportFrom. + epoch, expiration, supported := sm.SupportFrom(remoteStore) + require.Equal(t, slpb.Epoch(0), epoch) + require.Equal(t, hlc.Timestamp{}, expiration) + require.False(t, supported) + + // Ensure heartbeats are sent. + msgs := ensureHeartbeats(t, sender, 10) + require.Equal(t, slpb.MsgHeartbeat, msgs[0].Type) + require.Equal(t, sm.storeID, msgs[0].From) + require.Equal(t, remoteStore, msgs[0].To) + requestedExpiration := msgs[0].Expiration + + // Process a heartbeat response from the remote store. + heartbeatResp := &slpb.Message{ + Type: slpb.MsgHeartbeatResp, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: requestedExpiration, + } + sm.HandleMessage(heartbeatResp) + + // Ensure support is provided as seen by SupportFrom. + testutils.SucceedsSoon( + t, func() error { + epoch, expiration, supported = sm.SupportFrom(remoteStore) + if !supported { + return errors.New("support not provided yet") + } + require.Equal(t, slpb.Epoch(1), epoch) + require.Equal(t, requestedExpiration, expiration) + require.True(t, supported) + return nil + }, + ) +} + +// TestSupportManagerProvidesSupport tests that the SupportManager provides +// support for a remote store. +func TestSupportManagerProvidesSupport(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + engine := storage.NewDefaultInMemForTesting() + defer engine.Close() + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := hlc.NewHybridManualClock() + clock := hlc.NewClockForTesting(manual) + sender := &testMessageSender{} + sm := NewSupportManager(store, engine, options, settings, stopper, clock, sender) + require.NoError(t, sm.Start(ctx)) + + // Process a heartbeat from the remote store. + heartbeat := &slpb.Message{ + Type: slpb.MsgHeartbeat, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: sm.clock.Now().AddDuration(time.Second), + } + sm.HandleMessage(heartbeat) + + // Ensure a response is sent. + testutils.SucceedsSoon( + t, func() error { + if sender.getNumSentMessages() == 0 { + return errors.New("more messages expected") + } + return nil + }, + ) + msg := sender.drainSentMessages()[0] + require.Equal(t, slpb.MsgHeartbeatResp, msg.Type) + require.Equal(t, heartbeat.To, msg.From) + require.Equal(t, heartbeat.From, msg.To) + require.Equal(t, heartbeat.Epoch, msg.Epoch) + require.Equal(t, heartbeat.Expiration, msg.Expiration) + + // Ensure support is provided as seen by SupportFor. + epoch, supported := sm.SupportFor(remoteStore) + require.Equal(t, slpb.Epoch(1), epoch) + require.True(t, supported) + + // Wait for support to be withdrawn. + testutils.SucceedsSoon( + t, func() error { + epoch, supported = sm.SupportFor(remoteStore) + if supported { + return errors.New("support not withdrawn yet") + } + require.Equal(t, slpb.Epoch(0), epoch) + require.False(t, supported) + return nil + }, + ) +} + +// TestSupportManagerEnableDisable tests that the SupportManager respects +// enabling and disabling. +func TestSupportManagerEnableDisable(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + engine := storage.NewDefaultInMemForTesting() + defer engine.Close() + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := hlc.NewHybridManualClock() + clock := hlc.NewClockForTesting(manual) + sender := &testMessageSender{} + sm := NewSupportManager(store, engine, options, settings, stopper, clock, sender) + require.NoError(t, sm.Start(ctx)) + + // Start sending heartbeats by calling SupportFrom. + _, _, supported := sm.SupportFrom(remoteStore) + require.False(t, supported) + ensureHeartbeats(t, sender, 10) + + // Disable Store Liveness and make sure heartbeats stop. + Enabled.Override(ctx, &settings.SV, false) + // One heartbeat may race in while heartbeats are being disabled. + ensureNoHeartbeats(t, sender, sm.options.HeartbeatInterval, 1) + + // Enable Store Liveness again and make sure heartbeats are sent. + Enabled.Override(ctx, &settings.SV, true) + ensureHeartbeats(t, sender, 10) +} + +// TestSupportManagerRestart tests that the SupportManager adjusts the clock +// correctly after restarting. +func TestSupportManagerRestart(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + engine := storage.NewDefaultInMemForTesting() + defer engine.Close() + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := hlc.NewHybridManualClock() + manualBehind := hlc.NewHybridManualClock() + clock := hlc.NewClockForTesting(manual) + clockBehind := hlc.NewClockForTesting(manualBehind) + sender := &testMessageSender{} + sm := NewSupportManager(store, engine, options, settings, stopper, clock, sender) + // Initialize the SupportManager without starting the main goroutine. + require.NoError(t, sm.onRestart(ctx)) + + // Establish support for and from the remote store, and withdraw support. + manual.Pause() + manualBehind.Pause() + sm.SupportFrom(remoteStore) + sm.maybeAddStores() + sm.sendHeartbeats(ctx) + requestedTime := sm.requesterStateHandler.requesterState.meta.MaxRequested + heartbeatResp := &slpb.Message{ + Type: slpb.MsgHeartbeatResp, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: requestedTime, + } + heartbeat := &slpb.Message{ + Type: slpb.MsgHeartbeat, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: clock.Now().AddDuration(sm.options.LivenessInterval), + } + sm.handleMessages(ctx, []*slpb.Message{heartbeatResp, heartbeat}) + manual.Resume() + manual.Increment(sm.options.LivenessInterval.Nanoseconds()) + sm.withdrawSupport(ctx) + withdrawalTime := sm.supporterStateHandler.supporterState.meta.MaxWithdrawn.ToTimestamp() + + // Simulate a restart by creating a new SupportManager with the same engine. + // Use a regressed clock. + sm = NewSupportManager(store, engine, options, settings, stopper, clockBehind, sender) + now := sm.clock.Now() + require.False(t, requestedTime.Less(now)) + require.False(t, withdrawalTime.Less(now)) + + manualBehind.Resume() + require.NoError(t, sm.onRestart(ctx)) + manualBehind.Pause() + + // Ensure the clock is set past MaxWithdrawn and MaxRequested. + now = sm.clock.Now() + require.True(t, requestedTime.Less(now)) + require.True(t, withdrawalTime.Less(now)) +} + +func TestSupportManagerDiskStall(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + engine := &testEngine{ + Engine: storage.NewDefaultInMemForTesting(), + blockingCh: make(chan struct{}, 1), + } + defer engine.Close() + settings := clustersettings.MakeTestingClusterSettings() + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + manual := hlc.NewHybridManualClock() + clock := hlc.NewClockForTesting(manual) + sender := &testMessageSender{} + sm := NewSupportManager(store, engine, options, settings, stopper, clock, sender) + // Initialize the SupportManager without starting the main goroutine. + require.NoError(t, sm.onRestart(ctx)) + + // Establish support for and from the remote store. + sm.SupportFrom(remoteStore) + sm.maybeAddStores() + sm.sendHeartbeats(ctx) + requestedTime := sm.requesterStateHandler.requesterState.meta.MaxRequested + heartbeatResp := &slpb.Message{ + Type: slpb.MsgHeartbeatResp, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: requestedTime, + } + heartbeat := &slpb.Message{ + Type: slpb.MsgHeartbeat, + From: remoteStore, + To: sm.storeID, + Epoch: slpb.Epoch(1), + Expiration: clock.Now().AddDuration(sm.options.LivenessInterval), + } + sm.handleMessages(ctx, []*slpb.Message{heartbeatResp, heartbeat}) + + // Start blocking writes. + engine.setBlockOnWrite(true) + sender.drainSentMessages() + + // Send heartbeats in a separate goroutine. It will block on writing the + // requester meta. + require.NoError( + t, sm.stopper.RunAsyncTask( + ctx, "heartbeat", sm.sendHeartbeats, + ), + ) + ensureNoHeartbeats(t, sender, sm.options.HeartbeatInterval, 0) + + // SupportFrom and SupportFor calls are still being answered. + epoch, _, supported := sm.SupportFrom(remoteStore) + require.Equal(t, slpb.Epoch(1), epoch) + require.True(t, supported) + + epoch, supported = sm.SupportFor(remoteStore) + require.Equal(t, slpb.Epoch(1), epoch) + require.True(t, supported) + + // Stop blocking writes. + engine.blockingCh <- struct{}{} + engine.setBlockOnWrite(false) + + // Ensure the heartbeat is unblocked and sent out. + ensureHeartbeats(t, sender, 1) +} + +func ensureHeartbeats(t *testing.T, sender *testMessageSender, expectedNum int) []slpb.Message { + var msgs []slpb.Message + testutils.SucceedsSoon( + t, func() error { + if sender.getNumSentMessages() < expectedNum { + return errors.New("not enough heartbeats") + } + msgs = sender.drainSentMessages() + require.Equal(t, slpb.MsgHeartbeat, msgs[0].Type) + return nil + }, + ) + return msgs +} + +func ensureNoHeartbeats( + t *testing.T, sender *testMessageSender, hbInterval time.Duration, slack int, +) { + sender.drainSentMessages() + err := testutils.SucceedsWithinError( + func() error { + if sender.getNumSentMessages() > slack { + return errors.New("heartbeats are sent") + } else { + return errors.New("no heartbeats") + } + }, hbInterval*10, + ) + require.Regexp(t, err, "no heartbeats") +} + +// testMessageSender implements the MessageSender interface and stores all sent +// messages in a slice. +type testMessageSender struct { + mu syncutil.Mutex + messages []slpb.Message +} + +func (tms *testMessageSender) SendAsync(msg slpb.Message) (sent bool) { + tms.mu.Lock() + defer tms.mu.Unlock() + tms.messages = append(tms.messages, msg) + return true +} + +func (tms *testMessageSender) drainSentMessages() []slpb.Message { + tms.mu.Lock() + defer tms.mu.Unlock() + msgs := tms.messages + tms.messages = nil + return msgs +} + +func (tms *testMessageSender) getNumSentMessages() int { + tms.mu.Lock() + defer tms.mu.Unlock() + return len(tms.messages) +} + +var _ MessageSender = (*testMessageSender)(nil) + +// testEngine is a wrapper around storage.Engine that helps simulate failed and +// stalled writes. +type testEngine struct { + storage.Engine + mu syncutil.Mutex + blockingCh chan struct{} + blockOnWrite bool + errorOnWrite bool +} + +func (te *testEngine) NewBatch() storage.Batch { + return testBatch{ + Batch: te.Engine.NewBatch(), + blockingCh: te.blockingCh, + blockOnWrite: te.blockOnWrite, + errorOnWrite: te.errorOnWrite, + } +} + +func (te *testEngine) setBlockOnWrite(bow bool) { + te.mu.Lock() + defer te.mu.Unlock() + te.blockOnWrite = bow +} + +func (te *testEngine) PutUnversioned(key roachpb.Key, value []byte) error { + te.mu.Lock() + defer te.mu.Unlock() + if te.blockOnWrite { + <-te.blockingCh + } + if te.errorOnWrite { + return errors.New("error writing") + } + return te.Engine.PutUnversioned(key, value) +} + +type testBatch struct { + storage.Batch + blockingCh chan struct{} + blockOnWrite bool + errorOnWrite bool +} + +func (tb testBatch) Commit(sync bool) error { + if tb.blockOnWrite { + <-tb.blockingCh + } + if tb.errorOnWrite { + return errors.New("error committing batch") + } + return tb.Batch.Commit(sync) +} diff --git a/pkg/kv/kvserver/storeliveness/supporter_state.go b/pkg/kv/kvserver/storeliveness/supporter_state.go index 3bd3658eae8f..36a93e08bfc1 100644 --- a/pkg/kv/kvserver/storeliveness/supporter_state.go +++ b/pkg/kv/kvserver/storeliveness/supporter_state.go @@ -36,9 +36,11 @@ type supporterState struct { // - ssfu := checkOutUpdate() // ssfu.handleHeartbeat(msg slpb.Message) // checkInUpdate(ssfu) +// finishUpdate(ssfu) // - ssfu := checkOutUpdate() // ssfu.withdrawSupport(now hlc.ClockTimestamp) // checkInUpdate(ssfu) +// finishUpdate(ssfu) // // Only one update can be in progress to ensure that multiple mutation methods // are not run concurrently. @@ -149,19 +151,20 @@ func (ssfu *supporterStateForUpdate) reset() { } // write writes the supporter meta and supportFor to disk if they changed in -// this update. -func (ssfu *supporterStateForUpdate) write(ctx context.Context, rw storage.ReadWriter) error { +// this update. Accepts a batch to avoid potentially writing multiple support +// states separately. +func (ssfu *supporterStateForUpdate) write(ctx context.Context, b storage.Batch) error { if ssfu.inProgress.meta == (slpb.SupporterMeta{}) && len(ssfu.inProgress.supportFor) == 0 { return nil } if ssfu.inProgress.meta != (slpb.SupporterMeta{}) { ssfu.assertMeta() - if err := writeSupporterMeta(ctx, rw, ssfu.inProgress.meta); err != nil { + if err := writeSupporterMeta(ctx, b, ssfu.inProgress.meta); err != nil { return err } } for _, ss := range ssfu.inProgress.supportFor { - if err := writeSupportForState(ctx, rw, ss); err != nil { + if err := writeSupportForState(ctx, b, ss); err != nil { return err } } @@ -204,10 +207,6 @@ func (ssh *supporterStateHandler) checkOutUpdate() *supporterStateForUpdate { // updates from the inProgress view. It clears the inProgress view, and swaps it // back in supporterStateHandler.update to be checked out by future updates. func (ssh *supporterStateHandler) checkInUpdate(ssfu *supporterStateForUpdate) { - defer func() { - ssfu.reset() - ssh.update.Swap(ssfu) - }() if ssfu.inProgress.meta == (slpb.SupporterMeta{}) && len(ssfu.inProgress.supportFor) == 0 { return } @@ -222,12 +221,20 @@ func (ssh *supporterStateHandler) checkInUpdate(ssfu *supporterStateForUpdate) { } } +// finishUpdate performs cleanup after a successful or unsuccessful +// checkInUpdate. It resets the supporterStateForUpdate in-progress state and +// makes it available for future check out. +func (ssh *supporterStateHandler) finishUpdate(ssfu *supporterStateForUpdate) { + ssfu.reset() + ssh.update.Swap(ssfu) +} + // Functions for handling heartbeats. // handleHeartbeat handles a single heartbeat message. It updates the inProgress // view of supporterStateForUpdate only if there are any changes, and returns // a heartbeat response message. -func (ssfu *supporterStateForUpdate) handleHeartbeat(msg slpb.Message) slpb.Message { +func (ssfu *supporterStateForUpdate) handleHeartbeat(msg *slpb.Message) slpb.Message { from := msg.From ss, ok := ssfu.getSupportFor(from) if !ok { @@ -248,7 +255,7 @@ func (ssfu *supporterStateForUpdate) handleHeartbeat(msg slpb.Message) slpb.Mess // handleHeartbeat contains the core logic for updating the epoch and expiration // of a support requester upon receiving a heartbeat. -func handleHeartbeat(ss slpb.SupportState, msg slpb.Message) slpb.SupportState { +func handleHeartbeat(ss slpb.SupportState, msg *slpb.Message) slpb.SupportState { if ss.Epoch == msg.Epoch { ss.Expiration.Forward(msg.Expiration) } else if ss.Epoch < msg.Epoch { diff --git a/pkg/kv/kvserver/storeliveness/testdata/basic b/pkg/kv/kvserver/storeliveness/testdata/basic index d67838761df3..65d18139916f 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/basic +++ b/pkg/kv/kvserver/storeliveness/testdata/basic @@ -6,8 +6,9 @@ # - withdrawing support. # ------------------------------------------------------------- -add-store node-id=2 store-id=2 +support-from node-id=2 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false send-heartbeats now=100 liveness-interval=10s ---- @@ -23,18 +24,18 @@ responses: support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:110.000000000,0} +epoch: 1, expiration: 110.000000000,0, support provided: true support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:200.000000000,0} +epoch: 2, support provided: true withdraw-support now=201 ---- support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:3 Expiration:0,0} +epoch: 0, support provided: false debug-requester-state ---- diff --git a/pkg/kv/kvserver/storeliveness/testdata/liveness_interval b/pkg/kv/kvserver/storeliveness/testdata/liveness_interval index 867256a9e4a5..5a3819e51246 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/liveness_interval +++ b/pkg/kv/kvserver/storeliveness/testdata/liveness_interval @@ -3,8 +3,9 @@ # values for the liveness interval. # ------------------------------------------------------------- -add-store node-id=2 store-id=2 +support-from node-id=2 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false # ------------------------------------------------------------- # Store (n1, s1) requests and receives support with @@ -22,7 +23,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:110.000000000,0} +epoch: 1, expiration: 110.000000000,0, support provided: true # ------------------------------------------------------------- @@ -41,7 +42,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:121.000000000,0} +epoch: 1, expiration: 121.000000000,0, support provided: true # ------------------------------------------------------------- @@ -60,4 +61,4 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:121.000000000,0} +epoch: 1, expiration: 121.000000000,0, support provided: true diff --git a/pkg/kv/kvserver/storeliveness/testdata/multi-store b/pkg/kv/kvserver/storeliveness/testdata/multi-store index 613b0e88ec53..877f93c78a43 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/multi-store +++ b/pkg/kv/kvserver/storeliveness/testdata/multi-store @@ -5,14 +5,17 @@ # - withdrawing support. # ------------------------------------------------------------- -add-store node-id=1 store-id=2 +support-from node-id=1 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false -add-store node-id=2 store-id=3 +support-from node-id=2 store-id=3 ---- +epoch: 0, expiration: 0,0, support provided: false -add-store node-id=2 store-id=4 +support-from node-id=2 store-id=4 ---- +epoch: 0, expiration: 0,0, support provided: false send-heartbeats now=100 liveness-interval=10s ---- @@ -72,3 +75,47 @@ support for: {Target:{NodeID:1 StoreID:2} Epoch:3 Expiration:0,0} {Target:{NodeID:2 StoreID:3} Epoch:4 Expiration:0,0} {Target:{NodeID:2 StoreID:4} Epoch:4 Expiration:104.000000000,0} + + +# ------------------------------------------------------------- +# Store (n1, s1) processes some messages that require updating +# both the requester and supporter states. The writes to disk +# fail; both states are expected to be reset to the pre-update +# values. +# ------------------------------------------------------------- + +error-on-write on=true +---- + +handle-messages + msg type=MsgHeartbeat from-node-id=2 from-store-id=3 epoch=4 expiration=200 + msg type=MsgHeartbeatResp from-node-id=2 from-store-id=4 epoch=1 expiration=200 +---- + +error-on-write on=false +---- + +debug-requester-state +---- +meta: +{MaxEpoch:2 MaxRequested:110.000000000,0} +support from: +{Target:{NodeID:1 StoreID:2} Epoch:1 Expiration:110.000000000,0} +{Target:{NodeID:2 StoreID:3} Epoch:2 Expiration:0,0} +{Target:{NodeID:2 StoreID:4} Epoch:1 Expiration:110.000000000,0} + +debug-supporter-state +---- +meta: +{MaxWithdrawn:103.000000000,0} +support for: +{Target:{NodeID:1 StoreID:2} Epoch:3 Expiration:0,0} +{Target:{NodeID:2 StoreID:3} Epoch:4 Expiration:0,0} +{Target:{NodeID:2 StoreID:4} Epoch:4 Expiration:104.000000000,0} + +send-heartbeats now=200 liveness-interval=10s +---- +heartbeats: +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:1 StoreID:2} Epoch:1 Expiration:210.000000000,0} +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:3} Epoch:2 Expiration:210.000000000,0} +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:4} Epoch:1 Expiration:210.000000000,0} diff --git a/pkg/kv/kvserver/storeliveness/testdata/requester_state b/pkg/kv/kvserver/storeliveness/testdata/requester_state index 0858cb2f9e7a..d54a59e4f460 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/requester_state +++ b/pkg/kv/kvserver/storeliveness/testdata/requester_state @@ -3,8 +3,9 @@ # support from another store (n2, s2). # ------------------------------------------------------------- -add-store node-id=2 store-id=2 +support-from node-id=2 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false # ------------------------------------------------------------- # Store (n1, s1) successfully establishes support. @@ -21,7 +22,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:110.000000000,0} +epoch: 1, expiration: 110.000000000,0, support provided: true debug-requester-state ---- @@ -46,7 +47,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:210.000000000,0} +epoch: 1, expiration: 210.000000000,0, support provided: true # ------------------------------------------------------------- @@ -64,7 +65,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:0,0} +epoch: 0, expiration: 0,0, support provided: false debug-requester-state ---- @@ -89,7 +90,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} +epoch: 2, expiration: 410.000000000,0, support provided: true # ------------------------------------------------------------- @@ -102,7 +103,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} +epoch: 2, expiration: 410.000000000,0, support provided: true handle-messages msg type=MsgHeartbeatResp from-node-id=2 from-store-id=2 epoch=1 expiration=0 @@ -110,7 +111,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} +epoch: 2, expiration: 410.000000000,0, support provided: true handle-messages msg type=MsgHeartbeatResp from-node-id=2 from-store-id=2 epoch=2 expiration=400 @@ -118,7 +119,7 @@ handle-messages support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} +epoch: 2, expiration: 410.000000000,0, support provided: true # ------------------------------------------------------------- @@ -132,7 +133,27 @@ heartbeats: support-from node-id=2 store-id=2 ---- -requester state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} +epoch: 2, expiration: 410.000000000,0, support provided: true + +debug-requester-state +---- +meta: +{MaxEpoch:2 MaxRequested:510.000000000,0} +support from: +{Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} + + +# ------------------------------------------------------------- +# Store (n1, s1) stops requesting support after no recent calls +# to SupportFrom for (n2, s2). +# +# SupportFrom was already called above, so the first call to +# mark-idle-stores will set recentlyQueried to inactive, and +# the second call to mark-idle-stores will set it to idle. +# ------------------------------------------------------------- + +mark-idle-stores +---- debug-requester-state ---- @@ -140,3 +161,58 @@ meta: {MaxEpoch:2 MaxRequested:510.000000000,0} support from: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} + +send-heartbeats now=550 liveness-interval=10s +---- +heartbeats: +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:2} Epoch:2 Expiration:560.000000000,0} + +mark-idle-stores +---- + +debug-requester-state +---- +meta: +{MaxEpoch:2 MaxRequested:560.000000000,0} +support from: +{Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} + +send-heartbeats now=600 liveness-interval=10s +---- +heartbeats: + +support-from node-id=2 store-id=2 +---- +epoch: 2, expiration: 410.000000000,0, support provided: true + +send-heartbeats now=700 liveness-interval=10s +---- +heartbeats: +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:2} Epoch:2 Expiration:710.000000000,0} + + +# ------------------------------------------------------------- +# Store (n1, s1) requests support but fails to write meta. +# ------------------------------------------------------------- + +error-on-write on=true +---- + +send-heartbeats now=800 liveness-interval=10s +---- +heartbeats: + +debug-requester-state +---- +meta: +{MaxEpoch:2 MaxRequested:710.000000000,0} +support from: +{Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:410.000000000,0} + +error-on-write on=false +---- + +send-heartbeats now=900 liveness-interval=10s +---- +heartbeats: +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:2} Epoch:2 Expiration:910.000000000,0} diff --git a/pkg/kv/kvserver/storeliveness/testdata/restart b/pkg/kv/kvserver/storeliveness/testdata/restart index 203e665e4f12..0b16f0ac5573 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/restart +++ b/pkg/kv/kvserver/storeliveness/testdata/restart @@ -3,8 +3,9 @@ # records, but keeps all metadata and supportFor records. # ------------------------------------------------------------- -add-store node-id=2 store-id=2 +support-from node-id=2 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false # ------------------------------------------------------------- # Store (n1, s1) establishes support for and from (n2, s2). @@ -49,9 +50,14 @@ support for: # ------------------------------------------------------------- # Store (n1, s1) restarts. +# When restarting, the clock should be at least MaxRequested; +# otherwise, the test will hang while trying to sleep until +# MaxRequested. This test uses a manual clock, so it will never +# progress on its own. Clock-related properties are tested in +# TestSupportManagerRestart. # ------------------------------------------------------------- -restart +restart now=300 grace-period=10s ---- debug-requester-state @@ -67,23 +73,69 @@ meta: support for: {Target:{NodeID:2 StoreID:2} Epoch:3 Expiration:300.000000000,0} +# ------------------------------------------------------------- +# Store (n1, s1) receives an old heartbeat response from store +# (n2, s2) after restarting. It's ignored +# ------------------------------------------------------------- + +handle-messages + msg type=MsgHeartbeatResp from-node-id=2 from-store-id=2 epoch=4 expiration=200 +---- + +debug-requester-state +---- +meta: +{MaxEpoch:3 MaxRequested:110.000000000,0} +support from: + + +# ------------------------------------------------------------- +# Store (n1, s1) attempts to withdraw support before the grace +# period has elapsed. +# ------------------------------------------------------------- + +withdraw-support now=310 +---- + +debug-supporter-state +---- +meta: +{MaxWithdrawn:201.000000000,0} +support for: +{Target:{NodeID:2 StoreID:2} Epoch:3 Expiration:300.000000000,0} + +# ------------------------------------------------------------- +# Store (n1, s1) withdraws support after the grace period. +# ------------------------------------------------------------- + +withdraw-support now=311 +---- + +debug-supporter-state +---- +meta: +{MaxWithdrawn:311.000000000,0} +support for: +{Target:{NodeID:2 StoreID:2} Epoch:4 Expiration:0,0} + # ------------------------------------------------------------- # Store (n1, s1) sends heartbeats but it forgot about support # from (n2, s2), so it doesn't send any heartbeats. # ------------------------------------------------------------- -send-heartbeats now=200 liveness-interval=10s +send-heartbeats now=400 liveness-interval=10s ---- heartbeats: -add-store node-id=2 store-id=2 +support-from node-id=2 store-id=2 ---- +epoch: 0, expiration: 0,0, support provided: false # ------------------------------------------------------------- # Store (n1, s1) sends heartbeats with an incremented epoch. # ------------------------------------------------------------- -send-heartbeats now=200 liveness-interval=10s +send-heartbeats now=500 liveness-interval=10s ---- heartbeats: -{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:2} Epoch:3 Expiration:210.000000000,0} +{Type:MsgHeartbeat From:{NodeID:1 StoreID:1} To:{NodeID:2 StoreID:2} Epoch:3 Expiration:510.000000000,0} diff --git a/pkg/kv/kvserver/storeliveness/testdata/supporter_state b/pkg/kv/kvserver/storeliveness/testdata/supporter_state index c60c349255b8..6184e8e96343 100644 --- a/pkg/kv/kvserver/storeliveness/testdata/supporter_state +++ b/pkg/kv/kvserver/storeliveness/testdata/supporter_state @@ -15,7 +15,7 @@ responses: support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:100.000000000,0} +epoch: 1, support provided: true # ------------------------------------------------------------- @@ -30,7 +30,7 @@ responses: support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:200.000000000,0} +epoch: 1, support provided: true # ------------------------------------------------------------- @@ -42,7 +42,7 @@ withdraw-support now=199 support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:1 Expiration:200.000000000,0} +epoch: 1, support provided: true # ------------------------------------------------------------- @@ -54,7 +54,7 @@ withdraw-support now=201 support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:0,0} +epoch: 0, support provided: false debug-supporter-state ---- @@ -66,9 +66,6 @@ support for: # ------------------------------------------------------------- # Store (n1, s1) provides support at a higher epoch. -# TODO(mira): the expiration can regress here wrt the previous -# supported expiration but it won't be possible after clocks are -# integrated into this test. # ------------------------------------------------------------- handle-messages @@ -79,7 +76,7 @@ responses: support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:300.000000000,0} +epoch: 2, support provided: true # ------------------------------------------------------------- @@ -94,7 +91,7 @@ responses: support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:300.000000000,0} +epoch: 2, support provided: true handle-messages msg type=MsgHeartbeat from-node-id=2 from-store-id=2 epoch=2 expiration=299 @@ -104,4 +101,29 @@ responses: support-for node-id=2 store-id=2 ---- -supporter state: {Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:300.000000000,0} +epoch: 2, support provided: true + + +# ------------------------------------------------------------- +# Store (n1, s1) fails to write the support state. +# ------------------------------------------------------------- + +error-on-write on=true +---- + +handle-messages + msg type=MsgHeartbeat from-node-id=2 from-store-id=2 epoch=3 expiration=400 +---- + +withdraw-support now=400 +---- + +error-on-write on=false +---- + +debug-supporter-state +---- +meta: +{MaxWithdrawn:201.000000000,0} +support for: +{Target:{NodeID:2 StoreID:2} Epoch:2 Expiration:300.000000000,0} diff --git a/pkg/kv/kvserver/storeliveness/transport.go b/pkg/kv/kvserver/storeliveness/transport.go index 855f076acb61..9bb84107fc3d 100644 --- a/pkg/kv/kvserver/storeliveness/transport.go +++ b/pkg/kv/kvserver/storeliveness/transport.go @@ -53,7 +53,7 @@ type MessageHandler interface { // block (e.g. do a synchronous disk write) to prevent a single store with a // problem (e.g. a stalled disk) from affecting message receipt by other // stores on the same node. - HandleMessage(ctx context.Context, msg *slpb.Message) + HandleMessage(msg *slpb.Message) } // sendQueue is a queue of outgoing Messages. @@ -81,6 +81,8 @@ type Transport struct { handlers syncutil.Map[roachpb.StoreID, MessageHandler] } +var _ MessageSender = (*Transport)(nil) + // NewTransport creates a new Store Liveness Transport. func NewTransport( ambient log.AmbientContext, @@ -158,12 +160,12 @@ func (t *Transport) handleMessage(ctx context.Context, msg *slpb.Message) { return } - (*handler).HandleMessage(ctx, msg) + (*handler).HandleMessage(msg) } -// SendAsync sends a message to the recipient specified in the request. It -// returns false if the outgoing queue is full or the node dialer's circuit -// breaker has tripped. +// SendAsync implements the MessageSender interface. It sends a message to the +// recipient specified in the request, and returns false if the outgoing queue +// is full or the node dialer's circuit breaker has tripped. // // The returned bool may be a false positive but will never be a false negative; // if sent is true the message may or may not actually be sent but if it's false diff --git a/pkg/kv/kvserver/storeliveness/transport_test.go b/pkg/kv/kvserver/storeliveness/transport_test.go index 03bb4eecbd08..654655e5579d 100644 --- a/pkg/kv/kvserver/storeliveness/transport_test.go +++ b/pkg/kv/kvserver/storeliveness/transport_test.go @@ -49,7 +49,7 @@ func newMessageHandler(size int) testMessageHandler { } } -func (tmh *testMessageHandler) HandleMessage(_ context.Context, msg *slpb.Message) { +func (tmh *testMessageHandler) HandleMessage(msg *slpb.Message) { // Simulate a message handling delay. time.Sleep(time.Duration(rand.Int63n(int64(maxDelay)))) tmh.messages <- msg diff --git a/pkg/kv/kvserver/tenantrate/BUILD.bazel b/pkg/kv/kvserver/tenantrate/BUILD.bazel index 975d0ae9fcce..f3011f9b7b06 100644 --- a/pkg/kv/kvserver/tenantrate/BUILD.bazel +++ b/pkg/kv/kvserver/tenantrate/BUILD.bazel @@ -47,6 +47,7 @@ go_test( "//pkg/testutils/metrictestutils", "//pkg/util/leaktest", "//pkg/util/metric", + "//pkg/util/quotapool", "//pkg/util/stop", "//pkg/util/timeutil", "@com_github_cockroachdb_datadriven//:datadriven", diff --git a/pkg/kv/kvserver/tenantrate/factory.go b/pkg/kv/kvserver/tenantrate/factory.go index 9a35d54b46dc..438016473cdc 100644 --- a/pkg/kv/kvserver/tenantrate/factory.go +++ b/pkg/kv/kvserver/tenantrate/factory.go @@ -19,13 +19,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/quotapool" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) // TestingKnobs configures a LimiterFactory for testing. type TestingKnobs struct { - TimeSource timeutil.TimeSource + QuotaPoolOptions []quotapool.Option // Authorizer, if set, replaces the authorizer in the RPCContext. Authorizer tenantcapabilities.Authorizer @@ -95,9 +94,7 @@ func (rl *LimiterFactory) GetTenant( rcLim, ok := rl.mu.tenants[tenantID] if !ok { var options []quotapool.Option - if rl.knobs.TimeSource != nil { - options = append(options, quotapool.WithTimeSource(rl.knobs.TimeSource)) - } + options = append(options, rl.knobs.QuotaPoolOptions...) if closer != nil { options = append(options, quotapool.WithCloser(closer)) } diff --git a/pkg/kv/kvserver/tenantrate/limiter_test.go b/pkg/kv/kvserver/tenantrate/limiter_test.go index f92adab99483..e4029155c76a 100644 --- a/pkg/kv/kvserver/tenantrate/limiter_test.go +++ b/pkg/kv/kvserver/tenantrate/limiter_test.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/metrictestutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/quotapool" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/datadriven" @@ -52,7 +53,7 @@ func TestCloser(t *testing.T) { start := timeutil.Now() timeSource := timeutil.NewManualTime(start) factory := tenantrate.NewLimiterFactory(&st.SV, &tenantrate.TestingKnobs{ - TimeSource: timeSource, + QuotaPoolOptions: []quotapool.Option{quotapool.WithTimeSource(timeSource)}, }, fakeAuthorizer{}) tenant := roachpb.MustMakeTenantID(2) closer := make(chan struct{}) @@ -220,7 +221,7 @@ func (ts *testState) init(t *testing.T, d *datadriven.TestData) string { parseSettings(t, d, &ts.config, ts.capabilities) ts.rl = tenantrate.NewLimiterFactory(&ts.settings.SV, &tenantrate.TestingKnobs{ - TimeSource: ts.clock, + QuotaPoolOptions: []quotapool.Option{quotapool.WithTimeSource(ts.clock)}, }, ts) ts.rl.UpdateConfig(ts.config) ts.m = metric.NewRegistry() @@ -667,6 +668,10 @@ func (ts *testState) BindReader(tenantcapabilities.Reader) {} var _ tenantcapabilities.Authorizer = &testState{} +func (ts *testState) HasCrossTenantRead(ctx context.Context, tenID roachpb.TenantID) bool { + return false +} + func (ts *testState) HasProcessDebugCapability(ctx context.Context, tenID roachpb.TenantID) error { if ts.capabilities[tenID].CanDebugProcess { return nil @@ -785,6 +790,10 @@ type fakeAuthorizer struct{} var _ tenantcapabilities.Authorizer = &fakeAuthorizer{} +func (fakeAuthorizer) HasCrossTenantRead(ctx context.Context, tenID roachpb.TenantID) bool { + return false +} + func (fakeAuthorizer) HasNodeStatusCapability(_ context.Context, tenID roachpb.TenantID) error { return nil } diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/admission_post_split_merge b/pkg/kv/kvserver/testdata/flow_control_integration/admission_post_split_merge index 87cf883bbef6..1fc1a9c3409a 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/admission_post_split_merge +++ b/pkg/kv/kvserver/testdata/flow_control_integration/admission_post_split_merge @@ -50,8 +50,8 @@ ORDER BY streams DESC; range_id | stream_count -----------+--------------- - 69 | 3 70 | 3 + 71 | 3 -- (Merging ranges.) @@ -81,7 +81,7 @@ ORDER BY streams DESC; range_id | stream_count -----------+--------------- - 69 | 3 + 70 | 3 -- (Allow below-raft admission to proceed.) diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/basic b/pkg/kv/kvserver/testdata/flow_control_integration/basic index a87990ecdd61..be7cd7632686 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/basic +++ b/pkg/kv/kvserver/testdata/flow_control_integration/basic @@ -42,7 +42,7 @@ ORDER BY streams DESC; range_id | stream_count -----------+--------------- - 69 | 3 + 70 | 3 -- Flow token metrics from n1 after issuing the regular 1MiB replicated write, diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/blocked_admission b/pkg/kv/kvserver/testdata/flow_control_integration/blocked_admission index 266191847de4..11c881e3214f 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/blocked_admission +++ b/pkg/kv/kvserver/testdata/flow_control_integration/blocked_admission @@ -28,9 +28,9 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 2 | 5.0 MiB - 69 | 3 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 2 | 5.0 MiB + 70 | 3 | 5.0 MiB -- Observe the individual tracked tokens per-stream on the scratch range. @@ -39,21 +39,21 @@ SELECT range_id, store_id, priority, crdb_internal.humanize_bytes(tokens::INT8) range_id | store_id | priority | tokens -----------+----------+------------+---------- - 69 | 1 | normal-pri | 1.0 MiB - 69 | 1 | normal-pri | 1.0 MiB - 69 | 1 | normal-pri | 1.0 MiB - 69 | 1 | normal-pri | 1.0 MiB - 69 | 1 | normal-pri | 1.0 MiB - 69 | 2 | normal-pri | 1.0 MiB - 69 | 2 | normal-pri | 1.0 MiB - 69 | 2 | normal-pri | 1.0 MiB - 69 | 2 | normal-pri | 1.0 MiB - 69 | 2 | normal-pri | 1.0 MiB - 69 | 3 | normal-pri | 1.0 MiB - 69 | 3 | normal-pri | 1.0 MiB - 69 | 3 | normal-pri | 1.0 MiB - 69 | 3 | normal-pri | 1.0 MiB - 69 | 3 | normal-pri | 1.0 MiB + 70 | 1 | normal-pri | 1.0 MiB + 70 | 1 | normal-pri | 1.0 MiB + 70 | 1 | normal-pri | 1.0 MiB + 70 | 1 | normal-pri | 1.0 MiB + 70 | 1 | normal-pri | 1.0 MiB + 70 | 2 | normal-pri | 1.0 MiB + 70 | 2 | normal-pri | 1.0 MiB + 70 | 2 | normal-pri | 1.0 MiB + 70 | 2 | normal-pri | 1.0 MiB + 70 | 2 | normal-pri | 1.0 MiB + 70 | 3 | normal-pri | 1.0 MiB + 70 | 3 | normal-pri | 1.0 MiB + 70 | 3 | normal-pri | 1.0 MiB + 70 | 3 | normal-pri | 1.0 MiB + 70 | 3 | normal-pri | 1.0 MiB -- (Allow below-raft admission to proceed.) diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/crashed_node b/pkg/kv/kvserver/testdata/flow_control_integration/crashed_node index cd656f2531b5..fdf9870913fe 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/crashed_node +++ b/pkg/kv/kvserver/testdata/flow_control_integration/crashed_node @@ -28,8 +28,8 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 2 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 2 | 5.0 MiB -- (Crashing n2 but disabling the raft-transport-break token return mechanism.) @@ -42,7 +42,7 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB + 70 | 1 | 5.0 MiB -- Flow token metrics from n1 after n2 crashed. Observe that we've returned the diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/granter_admit_one_by_one b/pkg/kv/kvserver/testdata/flow_control_integration/granter_admit_one_by_one index ecaf22eb8e58..c8e4297b348b 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/granter_admit_one_by_one +++ b/pkg/kv/kvserver/testdata/flow_control_integration/granter_admit_one_by_one @@ -28,9 +28,9 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 1.0 MiB - 69 | 2 | 1.0 MiB - 69 | 3 | 1.0 MiB + 70 | 1 | 1.0 MiB + 70 | 2 | 1.0 MiB + 70 | 3 | 1.0 MiB -- (Allow below-raft admission to proceed.) diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/raft_membership b/pkg/kv/kvserver/testdata/flow_control_integration/raft_membership index 437e878744e2..8f0f923f8923 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/raft_membership +++ b/pkg/kv/kvserver/testdata/flow_control_integration/raft_membership @@ -28,10 +28,10 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 1.0 MiB - 69 | 2 | 1.0 MiB - 69 | 3 | 1.0 MiB - 69 | 4 | 0 B + 70 | 1 | 1.0 MiB + 70 | 2 | 1.0 MiB + 70 | 3 | 1.0 MiB + 70 | 4 | 0 B -- (Issuing 1x1MiB, 4x replicated write that's not admitted.) @@ -45,10 +45,10 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 2.0 MiB - 69 | 2 | 2.0 MiB - 69 | 3 | 2.0 MiB - 69 | 4 | 1.0 MiB + 70 | 1 | 2.0 MiB + 70 | 2 | 2.0 MiB + 70 | 3 | 2.0 MiB + 70 | 4 | 1.0 MiB -- (Removing voting replica from n3.) @@ -69,10 +69,10 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 3.0 MiB - 69 | 2 | 3.0 MiB - 69 | 4 | 2.0 MiB - 69 | 5 | 1.0 MiB + 70 | 1 | 3.0 MiB + 70 | 2 | 3.0 MiB + 70 | 4 | 2.0 MiB + 70 | 5 | 1.0 MiB -- (Allow below-raft admission to proceed.) @@ -84,10 +84,10 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 0 B - 69 | 2 | 0 B - 69 | 4 | 0 B - 69 | 5 | 0 B + 70 | 1 | 0 B + 70 | 2 | 0 B + 70 | 4 | 0 B + 70 | 5 | 0 B -- Flow token metrics from n1 after work gets admitted. All {regular,elastic} diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/raft_snapshot b/pkg/kv/kvserver/testdata/flow_control_integration/raft_snapshot index e124e4a627e9..acbc2ebf1b55 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/raft_snapshot +++ b/pkg/kv/kvserver/testdata/flow_control_integration/raft_snapshot @@ -25,11 +25,11 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 1.0 MiB - 69 | 2 | 1.0 MiB - 69 | 3 | 1.0 MiB - 69 | 4 | 1.0 MiB - 69 | 5 | 1.0 MiB + 70 | 1 | 1.0 MiB + 70 | 2 | 1.0 MiB + 70 | 3 | 1.0 MiB + 70 | 4 | 1.0 MiB + 70 | 5 | 1.0 MiB -- (Killing n2 and n3, but preventing their tokens from being returned + @@ -45,11 +45,11 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 1.0 MiB - 69 | 2 | 1.0 MiB - 69 | 3 | 1.0 MiB - 69 | 4 | 1.0 MiB - 69 | 5 | 1.0 MiB + 70 | 1 | 1.0 MiB + 70 | 2 | 1.0 MiB + 70 | 3 | 1.0 MiB + 70 | 4 | 1.0 MiB + 70 | 5 | 1.0 MiB -- (Issuing another 1MiB of 5x replicated writes while n2 and n3 are down and @@ -84,11 +84,11 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 2.0 MiB - 69 | 2 | 2.0 MiB - 69 | 3 | 2.0 MiB - 69 | 4 | 2.0 MiB - 69 | 5 | 2.0 MiB + 70 | 1 | 2.0 MiB + 70 | 2 | 2.0 MiB + 70 | 3 | 2.0 MiB + 70 | 4 | 2.0 MiB + 70 | 5 | 2.0 MiB -- (Truncating raft log.) @@ -124,9 +124,9 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 2.0 MiB - 69 | 4 | 2.0 MiB - 69 | 5 | 2.0 MiB + 70 | 1 | 2.0 MiB + 70 | 4 | 2.0 MiB + 70 | 5 | 2.0 MiB -- (Allow below-raft admission to proceed.) @@ -164,11 +164,11 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 0 B - 69 | 2 | 0 B - 69 | 3 | 0 B - 69 | 4 | 0 B - 69 | 5 | 0 B + 70 | 1 | 0 B + 70 | 2 | 0 B + 70 | 3 | 0 B + 70 | 4 | 0 B + 70 | 5 | 0 B -- Another view of tokens, using /inspectz-backed vtables. diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_break b/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_break index f1bf6a3ae583..5673745ed7e3 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_break +++ b/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_break @@ -28,9 +28,9 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 2 | 5.0 MiB - 69 | 3 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 2 | 5.0 MiB + 70 | 3 | 5.0 MiB -- (Crashing n2 but disabling the last-updated token return mechanism.) @@ -44,8 +44,8 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 3 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 3 | 5.0 MiB -- Flow token metrics from n1 after n2 crashed. Observe that we've returned the diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_culled b/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_culled index 0050cd6e7aee..28d85e9325f9 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_culled +++ b/pkg/kv/kvserver/testdata/flow_control_integration/raft_transport_culled @@ -29,9 +29,9 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 2 | 5.0 MiB - 69 | 3 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 2 | 5.0 MiB + 70 | 3 | 5.0 MiB -- (Marking n2->n1 raft transport gRPC stream as idle.) @@ -46,8 +46,8 @@ SELECT range_id, store_id, crdb_internal.humanize_bytes(total_tracked_tokens::IN range_id | store_id | total_tracked_tokens -----------+----------+----------------------- - 69 | 1 | 5.0 MiB - 69 | 3 | 5.0 MiB + 70 | 1 | 5.0 MiB + 70 | 3 | 5.0 MiB -- Flow token metrics from n1 after n2->n1 raft transport stream is culled. diff --git a/pkg/kv/kvserver/testdata/flow_control_integration/split_merge b/pkg/kv/kvserver/testdata/flow_control_integration/split_merge index cbefd0d820cd..bb7b0645b083 100644 --- a/pkg/kv/kvserver/testdata/flow_control_integration/split_merge +++ b/pkg/kv/kvserver/testdata/flow_control_integration/split_merge @@ -49,8 +49,8 @@ ORDER BY streams DESC; range_id | stream_count -----------+--------------- - 69 | 3 70 | 3 + 71 | 3 -- (Merging ranges.) @@ -82,7 +82,7 @@ ORDER BY streams DESC; range_id | stream_count -----------+--------------- - 69 | 3 + 70 | 3 ---- ---- diff --git a/pkg/kv/kvserver/txnwait/queue.go b/pkg/kv/kvserver/txnwait/queue.go index c2583b8e107a..cc6da592aa12 100644 --- a/pkg/kv/kvserver/txnwait/queue.go +++ b/pkg/kv/kvserver/txnwait/queue.go @@ -1005,13 +1005,7 @@ func (q *Queue) queryTxnStatus( } br := b.RawResponse() resp := br.Responses[0].GetInner().(*kvpb.QueryTxnResponse) - // ID can be nil if no HeartbeatTxn has been sent yet and we're talking to a - // 2.1 node. - // TODO(nvanbenschoten): Remove this in 2.3. - if updatedTxn := &resp.QueriedTxn; updatedTxn.ID != (uuid.UUID{}) { - return updatedTxn, resp.WaitingTxns, nil - } - return nil, nil, nil + return &resp.QueriedTxn, resp.WaitingTxns, nil } // forcePushAbort upgrades the PushTxn request to a "forced" push abort, which diff --git a/pkg/multitenant/mtinfo/BUILD.bazel b/pkg/multitenant/mtinfo/BUILD.bazel index 05cb5cc90452..ced1c1c3983c 100644 --- a/pkg/multitenant/mtinfo/BUILD.bazel +++ b/pkg/multitenant/mtinfo/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/multitenant/tenantcapabilities", "//pkg/roachpb", "//pkg/sql/sem/tree", + "//pkg/util/hlc", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/multitenant/mtinfo/info.go b/pkg/multitenant/mtinfo/info.go index 389180cfdd19..c562d4c10d0d 100644 --- a/pkg/multitenant/mtinfo/info.go +++ b/pkg/multitenant/mtinfo/info.go @@ -11,14 +11,21 @@ package mtinfo import ( + "context" + "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) +type ReadFromTenantInfoAccessor interface { + ReadFromTenantInfo(context.Context) (roachpb.TenantID, hlc.Timestamp, error) +} + // GetTenantInfoFromSQLRow synthetizes a TenantInfo from a SQL row // extracted from system.tenants. The caller is responsible for // passing a []tree.Datum of length 2 or more (ID, info, name*, data diff --git a/pkg/multitenant/mtinfopb/info.proto b/pkg/multitenant/mtinfopb/info.proto index 41dacee02e68..bc8d0a5b88fc 100644 --- a/pkg/multitenant/mtinfopb/info.proto +++ b/pkg/multitenant/mtinfopb/info.proto @@ -83,7 +83,12 @@ message ProtoInfo { // VIRTUAL CLUSTER FROM REPLICATION STREAM. optional util.hlc.Timestamp last_revert_tenant_timestamp = 8 [(gogoproto.nullable) = false]; - // Next ID: 9 + // ReadFromTenant is the ID, if any, of another tenant from which this tenant + // should be configured to serve read-only SQL requests (by configuring and + // maintaining its catalog accordingly). + optional roachpb.TenantID read_from_tenant = 9 ; + + // Next ID: 10. } message PreviousSourceTenant { diff --git a/pkg/multitenant/tenantcapabilities/interfaces.go b/pkg/multitenant/tenantcapabilities/interfaces.go index a9ad0bca42f8..4565c28de0c2 100644 --- a/pkg/multitenant/tenantcapabilities/interfaces.go +++ b/pkg/multitenant/tenantcapabilities/interfaces.go @@ -41,6 +41,9 @@ type Reader interface { // signals other than just the tenant capability state. For example, request // usage pattern over a timespan. type Authorizer interface { + // HasCrossTenantRead returns true if a tenant can read other tenant spans. + HasCrossTenantRead(ctx context.Context, tenID roachpb.TenantID) bool + // HasCapabilityForBatch returns an error if a tenant, referenced by its ID, // is not allowed to execute the supplied batch request given the capabilities // it possesses. diff --git a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_everything.go b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_everything.go index d327ea0512b4..51d02e67f1cb 100644 --- a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_everything.go +++ b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_everything.go @@ -29,6 +29,13 @@ func NewAllowEverythingAuthorizer() *AllowEverythingAuthorizer { return &AllowEverythingAuthorizer{} } +// HasCrossTenantRead returns true if a tenant can read from other tenants. +func (n *AllowEverythingAuthorizer) HasCrossTenantRead( + ctx context.Context, tenID roachpb.TenantID, +) bool { + return true +} + // HasCapabilityForBatch implements the tenantcapabilities.Authorizer interface. func (n *AllowEverythingAuthorizer) HasCapabilityForBatch( context.Context, roachpb.TenantID, *kvpb.BatchRequest, diff --git a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_nothing.go b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_nothing.go index 7c1f18e6a032..2af86695ecaf 100644 --- a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_nothing.go +++ b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/allow_nothing.go @@ -30,6 +30,13 @@ func NewAllowNothingAuthorizer() *AllowNothingAuthorizer { return &AllowNothingAuthorizer{} } +// HasCrossTenantRead returns true if a tenant can read from other tenants. +func (n *AllowNothingAuthorizer) HasCrossTenantRead( + ctx context.Context, tenID roachpb.TenantID, +) bool { + return false +} + // HasCapabilityForBatch implements the tenantcapabilities.Authorizer interface. func (n *AllowNothingAuthorizer) HasCapabilityForBatch( context.Context, roachpb.TenantID, *kvpb.BatchRequest, diff --git a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/authorizer.go b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/authorizer.go index 8283da33e194..41c214909167 100644 --- a/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/authorizer.go +++ b/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer/authorizer.go @@ -97,6 +97,25 @@ func New(settings *cluster.Settings, knobs *tenantcapabilities.TestingKnobs) *Au return a } +// HasCrossTenantRead returns true if a tenant can read from other tenants. +func (a *Authorizer) HasCrossTenantRead(ctx context.Context, tenID roachpb.TenantID) bool { + if tenID.IsSystem() { + // The system tenant has access to all request types. + return true + } + _, mode := a.getMode(ctx, tenID) + switch mode { + case authorizerModeOn, authorizerModeV222: + return false + case authorizerModeAllowAll: + return true + default: + err := errors.AssertionFailedf("unknown authorizer mode: %d", mode) + logcrash.ReportOrPanic(ctx, &a.settings.SV, "%v", err) + return false + } +} + // HasCapabilityForBatch implements the tenantcapabilities.Authorizer interface. func (a *Authorizer) HasCapabilityForBatch( ctx context.Context, tenID roachpb.TenantID, ba *kvpb.BatchRequest, diff --git a/pkg/protos.bzl b/pkg/protos.bzl new file mode 100644 index 000000000000..cb5f5d59900a --- /dev/null +++ b/pkg/protos.bzl @@ -0,0 +1,156 @@ +# Code generated by generate-bazel-extra, DO NOT EDIT. + +# This lists all the direct and indirect proto_library dependencies of +# //pkg/server/serverpb:serverpb_proto. +SERVER_PROTOS = [ + "//pkg/build:build_proto", + "//pkg/clusterversion:clusterversion_proto", + "//pkg/config/zonepb:zonepb_proto", + "//pkg/geo/geopb:geopb_proto", + "//pkg/gossip:gossip_proto", + "//pkg/jobs/jobspb:jobspb_proto", + "//pkg/kv/kvpb:kvpb_proto", + "//pkg/kv/kvserver/concurrency/isolation:isolation_proto", + "//pkg/kv/kvserver/concurrency/lock:lock_proto", + "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb:kvflowcontrolpb_proto", + "//pkg/kv/kvserver/kvserverpb:kvserverpb_proto", + "//pkg/kv/kvserver/liveness/livenesspb:livenesspb_proto", + "//pkg/kv/kvserver/loqrecovery/loqrecoverypb:loqrecoverypb_proto", + "//pkg/kv/kvserver/readsummary/rspb:rspb_proto", + "//pkg/multitenant/mtinfopb:mtinfopb_proto", + "//pkg/multitenant/tenantcapabilities/tenantcapabilitiespb:tenantcapabilitiespb_proto", + "//pkg/raft/raftpb:raftpb_proto", + "//pkg/roachpb:roachpb_proto", + "//pkg/rpc/rpcpb:rpcpb_proto", + "//pkg/server/diagnostics/diagnosticspb:diagnosticspb_proto", + "//pkg/server/serverpb:serverpb_proto", + "//pkg/server/status/statuspb:statuspb_proto", + "//pkg/settings:settings_proto", + "//pkg/sql/appstatspb:appstatspb_proto", + "//pkg/sql/catalog/catenumpb:catenumpb_proto", + "//pkg/sql/catalog/catpb:catpb_proto", + "//pkg/sql/catalog/descpb:descpb_proto", + "//pkg/sql/catalog/fetchpb:fetchpb_proto", + "//pkg/sql/contentionpb:contentionpb_proto", + "//pkg/sql/lex:lex_proto", + "//pkg/sql/schemachanger/scpb:scpb_proto", + "//pkg/sql/sem/semenumpb:semenumpb_proto", + "//pkg/sql/sessiondatapb:sessiondatapb_proto", + "//pkg/sql/sqlstats/insights:insights_proto", + "//pkg/sql/types:types_proto", + "//pkg/storage/enginepb:enginepb_proto", + "//pkg/ts/catalog:catalog_proto", + "//pkg/ts/tspb:tspb_proto", + "//pkg/util/admission/admissionpb:admissionpb_proto", + "//pkg/util/duration:duration_proto", + "//pkg/util/hlc:hlc_proto", + "//pkg/util/log/logpb:logpb_proto", + "//pkg/util/metric:metric_proto", + "//pkg/util/timeutil/pgdate:pgdate_proto", + "//pkg/util/tracing/tracingpb:tracingpb_proto", + "//pkg/util:util_proto", + "@com_github_cockroachdb_errors//errorspb:errorspb_proto", + "@com_github_gogo_protobuf//gogoproto:gogo_proto", + "@com_github_prometheus_client_model//io/prometheus/client:io_prometheus_client_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:timestamp_proto", + "@go_googleapis//google/api:annotations_proto", + "@go_googleapis//google/api:http_proto", +] + +# This lists all the in-tree .proto files required to build serverpb_proto. +PROTO_FILES = [ + "//pkg/build:info.proto", + "//pkg/clusterversion:cluster_version.proto", + "//pkg/config/zonepb:zone.proto", + "//pkg/geo/geopb:config.proto", + "//pkg/geo/geopb:geopb.proto", + "//pkg/gossip:gossip.proto", + "//pkg/jobs/jobspb:jobs.proto", + "//pkg/jobs/jobspb:schedule.proto", + "//pkg/kv/kvpb:api.proto", + "//pkg/kv/kvpb:errors.proto", + "//pkg/kv/kvserver/concurrency/isolation:levels.proto", + "//pkg/kv/kvserver/concurrency/lock:lock_waiter.proto", + "//pkg/kv/kvserver/concurrency/lock:locking.proto", + "//pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb:kvflowcontrol.proto", + "//pkg/kv/kvserver/kvserverpb:internal_raft.proto", + "//pkg/kv/kvserver/kvserverpb:lease_status.proto", + "//pkg/kv/kvserver/kvserverpb:proposer_kv.proto", + "//pkg/kv/kvserver/kvserverpb:raft.proto", + "//pkg/kv/kvserver/kvserverpb:range_log.proto", + "//pkg/kv/kvserver/kvserverpb:state.proto", + "//pkg/kv/kvserver/liveness/livenesspb:liveness.proto", + "//pkg/kv/kvserver/loqrecovery/loqrecoverypb:recovery.proto", + "//pkg/kv/kvserver/readsummary/rspb:summary.proto", + "//pkg/multitenant/mtinfopb:info.proto", + "//pkg/multitenant/tenantcapabilities/tenantcapabilitiespb:capabilities.proto", + "//pkg/raft/raftpb:raft.proto", + "//pkg/roachpb:data.proto", + "//pkg/roachpb:index_usage_stats.proto", + "//pkg/roachpb:internal.proto", + "//pkg/roachpb:io-formats.proto", + "//pkg/roachpb:metadata.proto", + "//pkg/roachpb:span_config.proto", + "//pkg/roachpb:span_stats.proto", + "//pkg/rpc/rpcpb:rpc.proto", + "//pkg/server/diagnostics/diagnosticspb:diagnostics.proto", + "//pkg/server/serverpb:admin.proto", + "//pkg/server/serverpb:authentication.proto", + "//pkg/server/serverpb:index_recommendations.proto", + "//pkg/server/serverpb:init.proto", + "//pkg/server/serverpb:migration.proto", + "//pkg/server/serverpb:status.proto", + "//pkg/server/status/statuspb:status.proto", + "//pkg/settings:encoding.proto", + "//pkg/sql/appstatspb:app_stats.proto", + "//pkg/sql/catalog/catenumpb:encoded_datum.proto", + "//pkg/sql/catalog/catenumpb:index.proto", + "//pkg/sql/catalog/catpb:catalog.proto", + "//pkg/sql/catalog/catpb:enum.proto", + "//pkg/sql/catalog/catpb:function.proto", + "//pkg/sql/catalog/catpb:privilege.proto", + "//pkg/sql/catalog/descpb:join_type.proto", + "//pkg/sql/catalog/descpb:lease.proto", + "//pkg/sql/catalog/descpb:locking.proto", + "//pkg/sql/catalog/descpb:structured.proto", + "//pkg/sql/catalog/fetchpb:index_fetch.proto", + "//pkg/sql/contentionpb:contention.proto", + "//pkg/sql/lex:encode.proto", + "//pkg/sql/schemachanger/scpb:elements.proto", + "//pkg/sql/schemachanger/scpb:scpb.proto", + "//pkg/sql/sem/semenumpb:constraint.proto", + "//pkg/sql/sessiondatapb:local_only_session_data.proto", + "//pkg/sql/sessiondatapb:session_data.proto", + "//pkg/sql/sessiondatapb:session_migration.proto", + "//pkg/sql/sessiondatapb:session_revival_token.proto", + "//pkg/sql/sqlstats/insights:insights.proto", + "//pkg/sql/types:types.proto", + "//pkg/storage/enginepb:engine.proto", + "//pkg/storage/enginepb:file_registry.proto", + "//pkg/storage/enginepb:mvcc.proto", + "//pkg/storage/enginepb:mvcc3.proto", + "//pkg/storage/enginepb:rocksdb.proto", + "//pkg/ts/catalog:chart_catalog.proto", + "//pkg/ts/tspb:timeseries.proto", + "//pkg/util/admission/admissionpb:admission_stats.proto", + "//pkg/util/admission/admissionpb:io_threshold.proto", + "//pkg/util/duration:duration.proto", + "//pkg/util/hlc:legacy_timestamp.proto", + "//pkg/util/hlc:timestamp.proto", + "//pkg/util/log/logpb:event.proto", + "//pkg/util/log/logpb:log.proto", + "//pkg/util/metric:metric.proto", + "//pkg/util/timeutil/pgdate:pgdate.proto", + "//pkg/util/tracing/tracingpb:recorded_span.proto", + "//pkg/util/tracing/tracingpb:tracing.proto", + "//pkg/util:unresolved_addr.proto", + "@com_github_cockroachdb_errors//errorspb:errors.proto", + "@com_github_cockroachdb_errors//errorspb:hintdetail.proto", + "@com_github_cockroachdb_errors//errorspb:markers.proto", + "@com_github_cockroachdb_errors//errorspb:tags.proto", + "@com_github_cockroachdb_errors//errorspb:testing.proto", + "@com_github_prometheus_client_model//io/prometheus/client:metrics.proto", +] diff --git a/pkg/raft/BUILD.bazel b/pkg/raft/BUILD.bazel index 7b4dbb17adc5..4061909348c0 100644 --- a/pkg/raft/BUILD.bazel +++ b/pkg/raft/BUILD.bazel @@ -19,6 +19,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/raft", visibility = ["//visibility:public"], deps = [ + "//pkg/clusterversion", "//pkg/raft/confchange", "//pkg/raft/quorum", "//pkg/raft/raftpb", @@ -56,6 +57,7 @@ go_test( "//pkg/raft/raftstoreliveness", "//pkg/raft/rafttest", "//pkg/raft/tracker", + "//pkg/settings/cluster", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/raft/confchange/confchange.go b/pkg/raft/confchange/confchange.go index 8e204b4ffc3d..313290c8505c 100644 --- a/pkg/raft/confchange/confchange.go +++ b/pkg/raft/confchange/confchange.go @@ -271,10 +271,11 @@ func (c Changer) initProgress( // at all (and will thus likely need a snapshot), though the app may // have applied a snapshot out of band before adding the replica (thus // making the first index the better choice). - Match: 0, - Next: max(c.LastIndex, 1), // invariant: Match < Next - Inflights: tracker.NewInflights(c.MaxInflight, c.MaxInflightBytes), - IsLearner: isLearner, + Match: 0, + MatchCommit: 0, + Next: max(c.LastIndex, 1), // invariant: Match < Next + Inflights: tracker.NewInflights(c.MaxInflight, c.MaxInflightBytes), + IsLearner: isLearner, // When a node is first added, we should mark it as recently active. // Otherwise, CheckQuorum may cause us to step down if it is invoked // before the added node has had a chance to communicate with us. diff --git a/pkg/raft/confchange/testdata/joint_autoleave.txt b/pkg/raft/confchange/testdata/joint_autoleave.txt index be138df3cfaa..cea89288bdcb 100644 --- a/pkg/raft/confchange/testdata/joint_autoleave.txt +++ b/pkg/raft/confchange/testdata/joint_autoleave.txt @@ -5,16 +5,16 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 # Autoleave is reflected in the config. enter-joint autoleave=true v2 v3 ---- voters=(1 2 3)&&(1) autoleave -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 -3: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 # Can't enter-joint twice, even if autoleave changes. enter-joint autoleave=false @@ -24,6 +24,6 @@ config is already joint leave-joint ---- voters=(1 2 3) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 -3: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/joint_idempotency.txt b/pkg/raft/confchange/testdata/joint_idempotency.txt index a47f3a662c88..5851e66294f3 100644 --- a/pkg/raft/confchange/testdata/joint_idempotency.txt +++ b/pkg/raft/confchange/testdata/joint_idempotency.txt @@ -5,19 +5,19 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 enter-joint r1 r2 r9 v2 v3 v4 v2 v3 v4 l2 l2 r4 r4 l1 l1 ---- voters=(3)&&(1) learners=(2) learners_next=(1) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 learner -3: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +3: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 leave-joint ---- voters=(3) learners=(1 2) -1: StateProbe match=0 next=1 learner -2: StateProbe match=0 next=1 learner -3: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +3: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/joint_learners_next.txt b/pkg/raft/confchange/testdata/joint_learners_next.txt index 6faddfe7e973..a90d02127abd 100644 --- a/pkg/raft/confchange/testdata/joint_learners_next.txt +++ b/pkg/raft/confchange/testdata/joint_learners_next.txt @@ -8,17 +8,17 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 enter-joint v2 l1 ---- voters=(2)&&(1) learners_next=(1) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 leave-joint ---- voters=(2) learners=(1) -1: StateProbe match=0 next=1 learner -2: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/joint_safety.txt b/pkg/raft/confchange/testdata/joint_safety.txt index 75d11b199e02..84ba199d82f4 100644 --- a/pkg/raft/confchange/testdata/joint_safety.txt +++ b/pkg/raft/confchange/testdata/joint_safety.txt @@ -15,7 +15,7 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=3 +1: StateProbe match=0 next=3 sentCommit=0 matchCommit=0 leave-joint ---- @@ -25,7 +25,7 @@ can't leave a non-joint config enter-joint ---- voters=(1)&&(1) -1: StateProbe match=0 next=3 +1: StateProbe match=0 next=3 sentCommit=0 matchCommit=0 enter-joint ---- @@ -34,7 +34,7 @@ config is already joint leave-joint ---- voters=(1) -1: StateProbe match=0 next=3 +1: StateProbe match=0 next=3 sentCommit=0 matchCommit=0 leave-joint ---- @@ -45,10 +45,10 @@ enter-joint r1 v2 v3 l4 ---- voters=(2 3)&&(1) learners=(4) -1: StateProbe match=0 next=3 -2: StateProbe match=0 next=9 -3: StateProbe match=0 next=9 -4: StateProbe match=0 next=9 learner +1: StateProbe match=0 next=3 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +4: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 learner enter-joint ---- @@ -67,15 +67,15 @@ can't apply simple config change in joint config leave-joint ---- voters=(2 3) learners=(4) -2: StateProbe match=0 next=9 -3: StateProbe match=0 next=9 -4: StateProbe match=0 next=9 learner +2: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +4: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 learner simple l9 ---- voters=(2 3) learners=(4 9) -2: StateProbe match=0 next=9 -3: StateProbe match=0 next=9 -4: StateProbe match=0 next=9 learner -9: StateProbe match=0 next=14 learner +2: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 +4: StateProbe match=0 next=9 sentCommit=0 matchCommit=0 learner +9: StateProbe match=0 next=14 sentCommit=0 matchCommit=0 learner diff --git a/pkg/raft/confchange/testdata/simple_idempotency.txt b/pkg/raft/confchange/testdata/simple_idempotency.txt index e31c43b7ffff..da7c95cabf73 100644 --- a/pkg/raft/confchange/testdata/simple_idempotency.txt +++ b/pkg/raft/confchange/testdata/simple_idempotency.txt @@ -2,68 +2,68 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v2 ---- voters=(1 2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple l1 ---- voters=(2) learners=(1) -1: StateProbe match=0 next=1 learner -2: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple l1 ---- voters=(2) learners=(1) -1: StateProbe match=0 next=1 learner -2: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple r1 ---- voters=(2) -2: StateProbe match=0 next=2 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple r1 ---- voters=(2) -2: StateProbe match=0 next=2 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple v3 ---- voters=(2 3) -2: StateProbe match=0 next=2 -3: StateProbe match=0 next=7 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=7 sentCommit=0 matchCommit=0 simple r3 ---- voters=(2) -2: StateProbe match=0 next=2 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple r3 ---- voters=(2) -2: StateProbe match=0 next=2 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 simple r4 ---- voters=(2) -2: StateProbe match=0 next=2 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/simple_promote_demote.txt b/pkg/raft/confchange/testdata/simple_promote_demote.txt index b4b770de7c56..ebe3afa32d96 100644 --- a/pkg/raft/confchange/testdata/simple_promote_demote.txt +++ b/pkg/raft/confchange/testdata/simple_promote_demote.txt @@ -4,22 +4,22 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v2 ---- voters=(1 2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v3 ---- voters=(1 2 3) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 -3: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 # Can atomically demote and promote without a hitch. # This is pointless, but possible. @@ -27,18 +27,18 @@ simple l1 v1 ---- voters=(1 2 3) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 -3: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 # Can demote a voter. simple l2 ---- voters=(1 3) learners=(2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 learner -3: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 # Can atomically promote and demote the same voter. # This is pointless, but possible. @@ -46,15 +46,15 @@ simple v2 l2 ---- voters=(1 3) learners=(2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 learner -3: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 learner +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 # Can promote a voter. simple v2 ---- voters=(1 2 3) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 -3: StateProbe match=0 next=2 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/simple_safety.txt b/pkg/raft/confchange/testdata/simple_safety.txt index 6566c5fccf7d..6d53f1335acc 100644 --- a/pkg/raft/confchange/testdata/simple_safety.txt +++ b/pkg/raft/confchange/testdata/simple_safety.txt @@ -7,15 +7,15 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v2 l3 ---- voters=(1 2) learners=(3) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=2 -3: StateProbe match=0 next=2 learner +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 learner simple r1 v5 @@ -46,11 +46,11 @@ simple l2 l3 l4 l5 ---- voters=(1) learners=(2 3 4 5) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=2 learner -3: StateProbe match=0 next=2 learner -4: StateProbe match=0 next=8 learner -5: StateProbe match=0 next=8 learner +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 learner +3: StateProbe match=0 next=2 sentCommit=0 matchCommit=0 learner +4: StateProbe match=0 next=8 sentCommit=0 matchCommit=0 learner +5: StateProbe match=0 next=8 sentCommit=0 matchCommit=0 learner simple r1 @@ -61,4 +61,4 @@ simple r2 r3 r4 r5 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/update.txt b/pkg/raft/confchange/testdata/update.txt index ac47bf3e8cdf..3dcd88c0b6e9 100644 --- a/pkg/raft/confchange/testdata/update.txt +++ b/pkg/raft/confchange/testdata/update.txt @@ -6,18 +6,18 @@ simple v1 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple v2 u1 ---- voters=(1 2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 simple u1 u2 u3 u1 u2 u3 ---- voters=(1 2) -1: StateProbe match=0 next=1 -2: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 +2: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/confchange/testdata/zero.txt b/pkg/raft/confchange/testdata/zero.txt index 226ade088323..067e8223d197 100644 --- a/pkg/raft/confchange/testdata/zero.txt +++ b/pkg/raft/confchange/testdata/zero.txt @@ -3,4 +3,4 @@ simple v1 r0 v0 l0 ---- voters=(1) -1: StateProbe match=0 next=1 +1: StateProbe match=0 next=1 sentCommit=0 matchCommit=0 diff --git a/pkg/raft/log.go b/pkg/raft/log.go index 5707bd44d46c..ff0a02c3a9fe 100644 --- a/pkg/raft/log.go +++ b/pkg/raft/log.go @@ -349,7 +349,7 @@ func (l *raftLog) lastIndex() uint64 { // commitTo bumps the commit index to the given value if it is higher than the // current commit index. -func (l *raftLog) commitTo(mark logMark) { +func (l *raftLog) commitTo(mark LogMark) { // TODO(pav-kv): it is only safe to update the commit index if our log is // consistent with the mark.term leader. If the mark.term leader sees the // mark.index entry as committed, all future leaders have it in the log. It is @@ -357,11 +357,11 @@ func (l *raftLog) commitTo(mark logMark) { // accTerm >= mark.term. Do this once raftLog/unstable tracks the accTerm. // never decrease commit - if l.committed < mark.index { - if l.lastIndex() < mark.index { - l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", mark.index, l.lastIndex()) + if l.committed < mark.Index { + if l.lastIndex() < mark.Index { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", mark.Index, l.lastIndex()) } - l.committed = mark.index + l.committed = mark.Index } } @@ -400,7 +400,7 @@ func (l *raftLog) acceptApplying(i uint64, size entryEncodingSize, allowUnstable i < l.maxAppliableIndex(allowUnstable) } -func (l *raftLog) stableTo(mark logMark) { l.unstable.stableTo(mark) } +func (l *raftLog) stableTo(mark LogMark) { l.unstable.stableTo(mark) } func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } diff --git a/pkg/raft/log_test.go b/pkg/raft/log_test.go index 5a44ebde337c..0ef9923c409a 100644 --- a/pkg/raft/log_test.go +++ b/pkg/raft/log_test.go @@ -336,8 +336,8 @@ func TestHasNextCommittedEnts(t *testing.T) { require.True(t, raftLog.append(init)) require.NoError(t, storage.Append(init.entries[:1])) - raftLog.stableTo(logMark{term: init.term, index: 4}) - raftLog.commitTo(logMark{term: init.term, index: 5}) + raftLog.stableTo(LogMark{Term: init.term, Index: 4}) + raftLog.commitTo(LogMark{Term: init.term, Index: 5}) raftLog.appliedTo(tt.applied, 0 /* size */) raftLog.acceptApplying(tt.applying, 0 /* size */, tt.allowUnstable) raftLog.applyingEntsPaused = tt.paused @@ -390,8 +390,8 @@ func TestNextCommittedEnts(t *testing.T) { require.True(t, raftLog.append(init)) require.NoError(t, storage.Append(init.entries[:1])) - raftLog.stableTo(logMark{term: init.term, index: 4}) - raftLog.commitTo(logMark{term: init.term, index: 5}) + raftLog.stableTo(LogMark{Term: init.term, Index: 4}) + raftLog.commitTo(LogMark{Term: init.term, Index: 5}) raftLog.appliedTo(tt.applied, 0 /* size */) raftLog.acceptApplying(tt.applying, 0 /* size */, tt.allowUnstable) raftLog.applyingEntsPaused = tt.paused @@ -444,8 +444,8 @@ func TestAcceptApplying(t *testing.T) { require.True(t, raftLog.append(init)) require.NoError(t, storage.Append(init.entries[:1])) - raftLog.stableTo(logMark{term: init.term, index: 4}) - raftLog.commitTo(logMark{term: init.term, index: 5}) + raftLog.stableTo(LogMark{Term: init.term, Index: 4}) + raftLog.commitTo(LogMark{Term: init.term, Index: 5}) raftLog.appliedTo(3, 0 /* size */) raftLog.acceptApplying(tt.index, tt.size, tt.allowUnstable) @@ -488,8 +488,8 @@ func TestAppliedTo(t *testing.T) { require.True(t, raftLog.append(init)) require.NoError(t, storage.Append(init.entries[:1])) - raftLog.stableTo(logMark{term: init.term, index: 4}) - raftLog.commitTo(logMark{term: init.term, index: 5}) + raftLog.stableTo(LogMark{Term: init.term, Index: 4}) + raftLog.commitTo(LogMark{Term: init.term, Index: 5}) raftLog.appliedTo(3, 0 /* size */) raftLog.acceptApplying(5, maxSize+overshoot, false /* allowUnstable */) @@ -536,13 +536,13 @@ func TestCommitTo(t *testing.T) { init := entryID{}.append(1, 2, 3) commit := uint64(2) for _, tt := range []struct { - commit logMark + commit LogMark want uint64 panic bool }{ - {commit: logMark{term: 3, index: 3}, want: 3}, - {commit: logMark{term: 3, index: 2}, want: 2}, // commit does not regress - {commit: logMark{term: 3, index: 4}, panic: true}, // commit out of range -> panic + {commit: LogMark{Term: 3, Index: 3}, want: 3}, + {commit: LogMark{Term: 3, Index: 2}, want: 2}, // commit does not regress + {commit: LogMark{Term: 3, Index: 4}, panic: true}, // commit out of range -> panic // TODO(pav-kv): add commit marks with a different term. } { t.Run("", func(t *testing.T) { @@ -563,18 +563,18 @@ func TestCommitTo(t *testing.T) { func TestStableTo(t *testing.T) { init := entryID{}.append(1, 2) for _, tt := range []struct { - mark logMark + mark LogMark want uint64 // prev.index }{ // out of bounds - {mark: logMark{term: 2, index: 0}, want: 0}, - {mark: logMark{term: 2, index: 3}, want: 0}, + {mark: LogMark{Term: 2, Index: 0}, want: 0}, + {mark: LogMark{Term: 2, Index: 3}, want: 0}, // outdated accepted term - {mark: logMark{term: 1, index: 1}, want: 0}, - {mark: logMark{term: 1, index: 2}, want: 0}, + {mark: LogMark{Term: 1, Index: 1}, want: 0}, + {mark: LogMark{Term: 1, Index: 2}, want: 0}, // successful acknowledgements - {mark: logMark{term: 2, index: 1}, want: 1}, - {mark: logMark{term: 2, index: 2}, want: 2}, + {mark: LogMark{Term: 2, Index: 1}, want: 1}, + {mark: LogMark{Term: 2, Index: 2}, want: 2}, } { t.Run("", func(t *testing.T) { raftLog := newLog(NewMemoryStorage(), discardLogger) @@ -590,24 +590,24 @@ func TestStableToWithSnap(t *testing.T) { snap := pb.Snapshot{Metadata: pb.SnapshotMetadata{Term: snapID.term, Index: snapID.index}} for _, tt := range []struct { sl logSlice - to logMark + to LogMark want uint64 // prev.index }{ // out of bounds - {sl: snapID.append(), to: logMark{term: 1, index: 2}, want: 5}, - {sl: snapID.append(), to: logMark{term: 2, index: 6}, want: 5}, - {sl: snapID.append(), to: logMark{term: 2, index: 7}, want: 5}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 2, index: 4}, want: 5}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 2, index: 10}, want: 5}, + {sl: snapID.append(), to: LogMark{Term: 1, Index: 2}, want: 5}, + {sl: snapID.append(), to: LogMark{Term: 2, Index: 6}, want: 5}, + {sl: snapID.append(), to: LogMark{Term: 2, Index: 7}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 2, Index: 4}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 2, Index: 10}, want: 5}, // successful acknowledgements - {sl: snapID.append(6, 6, 8), to: logMark{term: 8, index: 5}, want: 5}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 8, index: 6}, want: 6}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 8, index: 7}, want: 7}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 8, index: 8}, want: 8}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 8, Index: 5}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 8, Index: 6}, want: 6}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 8, Index: 7}, want: 7}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 8, Index: 8}, want: 8}, // mismatching accepted term - {sl: snapID.append(6, 6, 8), to: logMark{term: 3, index: 6}, want: 5}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 3, index: 7}, want: 5}, - {sl: snapID.append(6, 6, 8), to: logMark{term: 3, index: 8}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 3, Index: 6}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 3, Index: 7}, want: 5}, + {sl: snapID.append(6, 6, 8), to: LogMark{Term: 3, Index: 8}, want: 5}, } { t.Run("", func(t *testing.T) { s := NewMemoryStorage() diff --git a/pkg/raft/log_unstable.go b/pkg/raft/log_unstable.go index 28d02114cb31..cac77f55e7de 100644 --- a/pkg/raft/log_unstable.go +++ b/pkg/raft/log_unstable.go @@ -172,34 +172,34 @@ func (u *unstable) acceptInProgress() { // // The method makes sure the entries can not be overwritten by an in-progress // log append. See the related comment in newStorageAppendRespMsg. -func (u *unstable) stableTo(mark logMark) { - if mark.term != u.term { +func (u *unstable) stableTo(mark LogMark) { + if mark.Term != u.term { // The last accepted term has changed. Ignore. This is possible if part or // all of the unstable log was replaced between that time that a set of // entries started to be written to stable storage and when they finished. u.logger.Infof("mark (term,index)=(%d,%d) mismatched the last accepted "+ - "term %d in unstable log; ignoring ", mark.term, mark.index, u.term) + "term %d in unstable log; ignoring ", mark.Term, mark.Index, u.term) return } - if u.snapshot != nil && mark.index == u.snapshot.Metadata.Index { + if u.snapshot != nil && mark.Index == u.snapshot.Metadata.Index { // Index matched unstable snapshot, not unstable entry. Ignore. - u.logger.Infof("entry at index %d matched unstable snapshot; ignoring", mark.index) + u.logger.Infof("entry at index %d matched unstable snapshot; ignoring", mark.Index) return } - if mark.index <= u.prev.index || mark.index > u.lastIndex() { + if mark.Index <= u.prev.index || mark.Index > u.lastIndex() { // Unstable entry missing. Ignore. - u.logger.Infof("entry at index %d missing from unstable log; ignoring", mark.index) + u.logger.Infof("entry at index %d missing from unstable log; ignoring", mark.Index) return } if u.snapshot != nil { u.logger.Panicf("mark %+v acked earlier than the snapshot(in-progress=%t): %s", mark, u.snapshotInProgress, DescribeSnapshot(*u.snapshot)) } - u.logSlice = u.forward(mark.index) + u.logSlice = u.forward(mark.Index) // TODO(pav-kv): why can mark.index overtake u.entryInProgress? Probably bugs // in tests using the log writes incorrectly, e.g. TestLeaderStartReplication // takes nextUnstableEnts() without acceptInProgress(). - u.entryInProgress = max(u.entryInProgress, mark.index) + u.entryInProgress = max(u.entryInProgress, mark.Index) u.shrinkEntriesArray() } diff --git a/pkg/raft/log_unstable_test.go b/pkg/raft/log_unstable_test.go index 38e50dd2dd18..175d749d5cd2 100644 --- a/pkg/raft/log_unstable_test.go +++ b/pkg/raft/log_unstable_test.go @@ -472,7 +472,7 @@ func TestUnstableStableTo(t *testing.T) { u.stableSnapTo(u.snapshot.Metadata.Index) } u.checkInvariants(t) - u.stableTo(logMark{term: tt.term, index: tt.index}) + u.stableTo(LogMark{Term: tt.term, Index: tt.index}) u.checkInvariants(t) require.Equal(t, tt.wprev, u.prev.index) require.Equal(t, tt.wentryInProgress, u.entryInProgress) diff --git a/pkg/raft/node_test.go b/pkg/raft/node_test.go index 69299ac96ef0..7fc88435f193 100644 --- a/pkg/raft/node_test.go +++ b/pkg/raft/node_test.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -161,9 +162,10 @@ func TestNodePropose(t *testing.T) { n.Propose(context.TODO(), []byte("somedata")) n.Stop() - require.Len(t, msgs, 1) - assert.Equal(t, raftpb.MsgProp, msgs[0].Type) - assert.Equal(t, []byte("somedata"), msgs[0].Entries[0].Data) + require.Len(t, msgs, 2) + assert.Equal(t, raftpb.MsgFortifyLeaderResp, msgs[0].Type) + assert.Equal(t, raftpb.MsgProp, msgs[1].Type) + assert.Equal(t, []byte("somedata"), msgs[1].Entries[0].Data) } // TestDisableProposalForwarding ensures that proposals are not forwarded to @@ -229,9 +231,10 @@ func TestNodeProposeConfig(t *testing.T) { n.ProposeConfChange(context.TODO(), cc) n.Stop() - require.Len(t, msgs, 1) - assert.Equal(t, raftpb.MsgProp, msgs[0].Type) - assert.Equal(t, ccdata, msgs[0].Entries[0].Data) + require.Len(t, msgs, 2) + assert.Equal(t, raftpb.MsgFortifyLeaderResp, msgs[0].Type) + assert.Equal(t, raftpb.MsgProp, msgs[1].Type) + assert.Equal(t, ccdata, msgs[1].Entries[0].Data) } // TestNodeProposeAddDuplicateNode ensures that two proposes to add the same node should @@ -374,14 +377,11 @@ func TestNodeProposeWaitDropped(t *testing.T) { } n.Advance() } - proposalTimeout := time.Millisecond * 100 - ctx, cancel := context.WithTimeout(context.Background(), proposalTimeout) - // propose with cancel should be cancelled earyly if dropped - assert.Equal(t, ErrProposalDropped, n.Propose(ctx, droppingMsg)) - cancel() + assert.Equal(t, ErrProposalDropped, n.Propose(context.Background(), droppingMsg)) n.Stop() - require.Empty(t, msgs) + require.Len(t, msgs, 1) + assert.Equal(t, raftpb.MsgFortifyLeaderResp, msgs[0].Type) } // TestNodeTick ensures that node.Tick() will increase the @@ -462,7 +462,7 @@ func TestNodeStart(t *testing.T) { HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1, Lead: 1, LeadEpoch: 1}, Entries: nil, CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}}, - MustSync: false, + MustSync: true, }, } storage := NewMemoryStorage() @@ -474,6 +474,7 @@ func TestNodeStart(t *testing.T) { MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } StartNode(c, []Peer{{ID: 1}}) ctx, cancel, n := newNodeTestHarness(context.Background(), t, c, Peer{ID: 1}) @@ -548,6 +549,7 @@ func TestNodeRestart(t *testing.T) { MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } n := RestartNode(c) defer n.Stop() @@ -597,6 +599,7 @@ func TestNodeRestartFromSnapshot(t *testing.T) { MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } n := RestartNode(c) defer n.Stop() @@ -621,6 +624,7 @@ func TestNodeAdvance(t *testing.T) { MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } ctx, cancel, n := newNodeTestHarness(context.Background(), t, c) defer cancel() diff --git a/pkg/raft/quorum/BUILD.bazel b/pkg/raft/quorum/BUILD.bazel index 662c5aaea5c6..ddbdb606687c 100644 --- a/pkg/raft/quorum/BUILD.bazel +++ b/pkg/raft/quorum/BUILD.bazel @@ -12,7 +12,10 @@ go_library( ], importpath = "github.com/cockroachdb/cockroach/pkg/raft/quorum", visibility = ["//visibility:public"], - deps = ["//pkg/raft/raftpb"], + deps = [ + "//pkg/raft/raftpb", + "//pkg/util/hlc", + ], ) go_test( @@ -21,12 +24,17 @@ go_test( "bench_test.go", "datadriven_test.go", "quick_test.go", + "quorum_test.go", ], data = glob(["testdata/**"]), embed = [":quorum"], deps = [ "//pkg/raft/raftpb", + "//pkg/util/hlc", + "//pkg/util/leaktest", + "//pkg/util/log", "@com_github_cockroachdb_datadriven//:datadriven", + "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/raft/quorum/joint.go b/pkg/raft/quorum/joint.go index 7d04a6b63da3..3aa33fb168ed 100644 --- a/pkg/raft/quorum/joint.go +++ b/pkg/raft/quorum/joint.go @@ -17,7 +17,10 @@ package quorum -import pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" +import ( + pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" +) // JointConfig is a configuration of two groups of (possibly overlapping) // majority configurations. Decisions require the support of both majorities. @@ -78,3 +81,12 @@ func (c JointConfig) VoteResult(votes map[pb.PeerID]bool) VoteResult { // One side won, the other one is pending, so the whole outcome is. return VotePending } + +// LeadSupportExpiration takes a mapping of timestamps peers have promised a +// leader support until and returns the timestamp until which the leader is +// guaranteed support until. +func (c JointConfig) LeadSupportExpiration(supported map[pb.PeerID]hlc.Timestamp) hlc.Timestamp { + qse := c[0].LeadSupportExpiration(supported) + qse.Backward(c[1].LeadSupportExpiration(supported)) + return qse +} diff --git a/pkg/raft/quorum/majority.go b/pkg/raft/quorum/majority.go index 6f143795e033..e923304e4abc 100644 --- a/pkg/raft/quorum/majority.go +++ b/pkg/raft/quorum/majority.go @@ -25,6 +25,7 @@ import ( "strings" pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" ) // MajorityConfig is a set of IDs that uses majority quorums to make decisions. @@ -118,6 +119,11 @@ func (c MajorityConfig) Slice() []pb.PeerID { return sl } +// NB: A lot of logic in CommittedIndex, VoteResult, and LeadSupportExpiration +// can be de-duplicated by using generics. This was attempted in +// https://github.com/cockroachdb/cockroach/pull/128054, but eventually +// abandoned because of microbenchmark regressions. + // CommittedIndex computes the committed index from those supplied via the // provided AckedIndexer (for the active config). func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { @@ -177,7 +183,7 @@ func (c MajorityConfig) VoteResult(votes map[pb.PeerID]bool) VoteResult { return VoteWon } - var votedCnt int //vote counts for yes. + var votedCnt int // vote counts for yes. var missing int for id := range c { v, ok := votes[id] @@ -199,3 +205,57 @@ func (c MajorityConfig) VoteResult(votes map[pb.PeerID]bool) VoteResult { } return VoteLost } + +// LeadSupportExpiration takes a mapping of timestamps peers have promised a +// fortified leader support until and returns the timestamp until which the +// leader is guaranteed support until. +func (c MajorityConfig) LeadSupportExpiration(supported map[pb.PeerID]hlc.Timestamp) hlc.Timestamp { + if len(c) == 0 { + // There are no peers in the config, and therefore no leader, so we return + // MaxTimestamp as a sentinel value. This also plays well with joint quorums + // when one half is the zero MajorityConfig. In such cases, the joint config + // should behave like the other half. + return hlc.MaxTimestamp + } + + n := len(c) + + // Use an on-stack slice whenever n <= 7 (otherwise we alloc). The assumption + // is that running with a replication factor of >7 is rare, and in cases in + // which it happens, performance is less of a concern (it's not like + // performance implications of an allocation here are drastic). + var stk [7]hlc.Timestamp + var srt []hlc.Timestamp + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]hlc.Timestamp, n) + } + + { + // Fill the slice with Timestamps for peers in the configuration. Any unused + // slots will be left as empty Timestamps for our calculation. We fill from + // the right (since the zeros will end up on the left after sorting anyway). + i := n - 1 + for id := range c { + if e, ok := supported[id]; ok { + srt[i] = e + i-- + } + } + } + + slices.SortFunc(srt, func(a hlc.Timestamp, b hlc.Timestamp) int { + return a.Compare(b) + }) + + // We want the maximum timestamp that's supported by the quorum. The + // assumption is that if a timestamp is supported by a peer, so are all + // timestamps less than that timestamp. For this, we can simply consider the + // quorum formed by picking the highest value elements and pick the minimum + // from this. In other words, from our sorted (in increasing order) array srt, + // we want to move n/2 + 1 to the left from the end (accounting for + // zero-indexing). + pos := n - (n/2 + 1) + return srt[pos] +} diff --git a/pkg/raft/quorum/quorum_test.go b/pkg/raft/quorum/quorum_test.go new file mode 100644 index 000000000000..cd88dcab4e98 --- /dev/null +++ b/pkg/raft/quorum/quorum_test.go @@ -0,0 +1,141 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package quorum + +import ( + "testing" + + pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +// TestLeadSupportExpiration ensures that the leader's support expiration is +// correctly calculated. +func TestLeadSupportExpiration(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ts := func(ts int64) hlc.Timestamp { + return hlc.Timestamp{ + WallTime: ts, + } + } + + testCases := []struct { + ids []pb.PeerID + support map[pb.PeerID]hlc.Timestamp + exp hlc.Timestamp + }{ + { + ids: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15)}, + exp: ts(15), + }, + { + ids: []pb.PeerID{1, 2, 3, 4}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15), 4: ts(20)}, + exp: ts(15), + }, + { + ids: []pb.PeerID{1, 2, 3, 4, 5}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15), 4: ts(20), 5: ts(20)}, + exp: ts(20), + }, + { + ids: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20)}, + exp: ts(10), + }, + { + ids: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10)}, + exp: hlc.Timestamp{}, + }, + { + ids: []pb.PeerID{}, + support: map[pb.PeerID]hlc.Timestamp{}, + exp: hlc.MaxTimestamp, + }, + } + + for _, tc := range testCases { + m := MajorityConfig{} + for _, id := range tc.ids { + m[id] = struct{}{} + } + + require.Equal(t, tc.exp, m.LeadSupportExpiration(tc.support)) + } +} + +// TestLeadSupportExpirationJointConfig ensures that the LeadSupportExpiration +// is calculated correctly for joint configurations. In particular, it's the +// minimum of the two majority configs. +func TestLeadSupportExpirationJointConfig(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ts := func(ts int64) hlc.Timestamp { + return hlc.Timestamp{ + WallTime: ts, + } + } + + testCases := []struct { + cfg1 []pb.PeerID + cfg2 []pb.PeerID + support map[pb.PeerID]hlc.Timestamp + exp hlc.Timestamp + }{ + { + cfg1: []pb.PeerID{1, 2, 3}, + cfg2: []pb.PeerID{}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15)}, + exp: ts(15), // cfg2 is empty, should behave like the (cfg1) majority config case + }, + { + cfg1: []pb.PeerID{}, + cfg2: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15)}, + exp: ts(15), // cfg1 is empty, should behave like the (cfg2) majority config case + }, + { + cfg1: []pb.PeerID{3, 4, 5}, + cfg2: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15), 4: ts(20), 5: ts(25)}, + exp: ts(15), // lower of the two + }, + { + cfg1: []pb.PeerID{3, 4, 5}, + cfg2: []pb.PeerID{1, 2, 3}, + support: map[pb.PeerID]hlc.Timestamp{1: ts(10), 2: ts(20), 3: ts(15), 4: ts(10), 5: ts(10)}, + exp: ts(10), // lower of the two; this time, cfg2 has the lower expiration + }, + } + + for _, tc := range testCases { + j := JointConfig{ + MajorityConfig{}, + MajorityConfig{}, + } + for _, id := range tc.cfg1 { + j[0][id] = struct{}{} + } + for _, id := range tc.cfg2 { + j[1][id] = struct{}{} + } + + require.Equal(t, tc.exp, j.LeadSupportExpiration(tc.support)) + } +} diff --git a/pkg/raft/raft.go b/pkg/raft/raft.go index 63a375445790..0a361724a162 100644 --- a/pkg/raft/raft.go +++ b/pkg/raft/raft.go @@ -19,6 +19,7 @@ package raft import ( "bytes" + "context" "crypto/rand" "errors" "fmt" @@ -28,6 +29,7 @@ import ( "strings" "sync" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/raft/confchange" "github.com/cockroachdb/cockroach/pkg/raft/quorum" pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" @@ -174,6 +176,8 @@ type Config struct { // threads are not responsible for understanding the response messages, only // for delivering them to the correct target after performing the storage // write. + // TODO(#129411): deprecate !AsyncStorageWrites mode as it's not used in + // CRDB. AsyncStorageWrites bool // MaxSizePerMsg limits the max byte size of each append message. Smaller @@ -265,6 +269,10 @@ type Config struct { // StoreLiveness is a reference to the store liveness fabric. StoreLiveness raftstoreliveness.StoreLiveness + + // CRDBVersion exposes the active version to Raft. This helps version-gating + // features. + CRDBVersion clusterversion.Handle } func (c *Config) validate() error { @@ -328,6 +336,7 @@ type raft struct { config quorum.Config trk tracker.ProgressTracker electionTracker tracker.ElectionTracker + supportTracker tracker.SupportTracker state StateType @@ -359,8 +368,28 @@ type raft struct { // // TODO(arul): This should be populated when responding to a MsgFortify. leadEpoch pb.Epoch - // leadTransferee is id of the leader transfer target when its value is not zero. - // Follow the procedure defined in raft thesis 3.10. + // leadTransferee, if set, is the id of the leader transfer target during a + // pending leadership transfer. The value is set while the outgoing leader + // (this node) is catching the target up on its log. During this time, the + // leader will drop incoming proposals to give the transfer target time to + // catch up. Once the transfer target is caught up, the leader will send it + // a MsgTimeoutNow to encourage it to campaign immediately while bypassing + // pre-vote and leader support safeguards. As soon as the MsgTimeoutNow is + // sent, the leader will step down to a follower, as it has irrevocably + // compromised its leadership term by giving the target permission to + // overthrow it. + // + // For cases where the transfer target is already caught up on the log at the + // time that the leader receives a MsgTransferLeader, the MsgTimeoutNow will + // be sent immediately and the leader will step down to a follower without + // ever setting this field. + // + // In either case, if the transfer fails after the MsgTimeoutNow has been + // sent, the leader (who has stepped down to a follower) must call a new + // election at a new term in order to reestablish leadership. + // + // This roughly follows the procedure defined in the raft thesis, section + // 3.10: Leadership transfer extension. leadTransferee pb.PeerID // Only one conf change may be pending (in the log, but not yet // applied) at a time. This is enforced via pendingConfIndex, which @@ -406,6 +435,7 @@ type raft struct { logger Logger storeLiveness raftstoreliveness.StoreLiveness + crdbVersion clusterversion.Handle } func newRaft(c *Config) *raft { @@ -436,10 +466,12 @@ func newRaft(c *Config) *raft { disableConfChangeValidation: c.DisableConfChangeValidation, stepDownOnRemoval: c.StepDownOnRemoval, storeLiveness: c.StoreLiveness, + crdbVersion: c.CRDBVersion, } lastID := r.raftLog.lastEntryID() - r.electionTracker = tracker.MakeVoteTracker(&r.config) + r.electionTracker = tracker.MakeElectionTracker(&r.config) + r.supportTracker = tracker.MakeSupportTracker(&r.config, r.storeLiveness) cfg, progressMap, err := confchange.Restore(confchange.Changer{ Config: quorum.MakeEmptyConfig(), @@ -593,7 +625,7 @@ func (r *raft) maybeSendAppend(to pb.PeerID) bool { pr := r.trk.Progress(to) last, commit := r.raftLog.lastIndex(), r.raftLog.committed - if !pr.ShouldSendMsgApp(last, commit) { + if !pr.ShouldSendMsgApp(last, commit, r.advanceCommitViaMsgAppOnly()) { return false } @@ -624,7 +656,7 @@ func (r *raft) maybeSendAppend(to pb.PeerID) bool { Match: pr.Match, }) pr.SentEntries(len(entries), uint64(payloadsSize(entries))) - pr.SentCommit(commit) + pr.MaybeUpdateSentCommit(commit) return true } @@ -666,14 +698,21 @@ func (r *raft) sendHeartbeat(to pb.PeerID) { // or it might not have all the committed entries. // The leader MUST NOT forward the follower's commit to // an unmatched index. - commit := min(pr.Match, r.raftLog.committed) + // NOTE: Starting from V24_3_AdvanceCommitIndexViaMsgApps, heartbeats do not + // advance the commit index. Instead, MsgApp are used for that purpose. + // TODO(iskettaneh): Remove the commit from the heartbeat message in versions + // >= 25.1. + var commit uint64 + if !r.advanceCommitViaMsgAppOnly() { + commit = min(pr.Match, r.raftLog.committed) + } r.send(pb.Message{ To: to, Type: pb.MsgHeartbeat, Commit: commit, Match: pr.Match, }) - pr.SentCommit(commit) + pr.MaybeUpdateSentCommit(commit) } // sendFortify sends a fortification RPC to the given peer. @@ -689,8 +728,14 @@ func (r *raft) sendFortify(to pb.PeerID) { epoch, live := r.storeLiveness.SupportFor(r.lead) if live { r.leadEpoch = epoch - // TODO(arul): For now, we're not recording any support on the leader. Do - // this once we implement handleFortifyResp correctly. + // The leader needs to persist the LeadEpoch durably before it can start + // supporting itself. We do so by sending a self-addressed + // MsgFortifyLeaderResp message so that it is added to the msgsAfterAppend + // slice and delivered back to this node only after LeadEpoch is + // persisted. At that point, this node can record support without + // discrimination for who is providing support (itself vs. other + // follower). + r.send(pb.Message{To: r.id, Type: pb.MsgFortifyLeaderResp, LeadEpoch: epoch}) } else { r.logger.Infof( "%x leader at term %d does not support itself in the liveness fabric", r.id, r.Term, @@ -706,6 +751,10 @@ func (r *raft) sendFortify(to pb.PeerID) { func (r *raft) bcastAppend() { r.trk.Visit(func(id pb.PeerID, _ *tracker.Progress) { if id == r.id { + // NB: the leader doesn't send MsgAppResp to itself here. This means that + // the leader will not have a chance to update its own + // MatchCommit/SentCommit. That is fine because the leader doesn't use + // MatchCommit/SentCommit for itself. It only uses the followers' values. return } r.maybeSendAppend(id) @@ -786,19 +835,30 @@ func (r *raft) maybeCommit() bool { if !r.raftLog.matchTerm(entryID{term: r.Term, index: index}) { return false } - r.raftLog.commitTo(logMark{term: r.Term, index: index}) + r.raftLog.commitTo(LogMark{Term: r.Term, Index: index}) return true } func (r *raft) reset(term uint64) { if r.Term != term { + // NB: There are state transitions where reset may be called on a follower + // that supports a fortified leader. One example is when a follower is + // supporting the old leader, but the quorum isn't, so a new leader is + // elected. this case, the follower that's supporting the old leader will + // eventually hear from the new leader and call reset with the new leader's + // term. Naively, this would trip this assertion -- however, in this + // case[*], we expect a call to deFortify first. + // + // [*] this case, and other cases where a state transition implies + // de-fortification. + assertTrue(!r.supportingFortifiedLeader() || r.lead == r.id, + "should not be changing terms when supporting a fortified leader; leader exempted") r.Term = term r.Vote = None + r.lead = None + r.leadEpoch = 0 } - // TODO(arul): we should only reset this if the term has changed. - r.lead = None - r.electionElapsed = 0 r.heartbeatElapsed = 0 r.resetRandomizedElectionTimeout() @@ -806,12 +866,14 @@ func (r *raft) reset(term uint64) { r.abortLeaderTransfer() r.electionTracker.ResetVotes() + r.supportTracker.Reset() r.trk.Visit(func(id pb.PeerID, pr *tracker.Progress) { *pr = tracker.Progress{ - Match: 0, - Next: r.raftLog.lastIndex() + 1, - Inflights: tracker.NewInflights(r.maxInflight, r.maxInflightBytes), - IsLearner: pr.IsLearner, + Match: 0, + MatchCommit: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.maxInflight, r.maxInflightBytes), + IsLearner: pr.IsLearner, } if id == r.id { pr.Match = r.raftLog.lastIndex() @@ -865,6 +927,37 @@ func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { // tickElection is run by followers and candidates after r.electionTimeout. func (r *raft) tickElection() { + assertTrue(r.state != StateLeader, "tickElection called by leader") + + if r.leadEpoch != 0 { + if r.supportingFortifiedLeader() { + // There's a fortified leader and we're supporting it. Reset the + // electionElapsed ticker and early return. + r.electionElapsed = 0 + return + } + // We're no longer supporting the fortified leader. Let's make this + // de-fortification explicit. Doing so ensures that we don't enter this + // conditional again unless the term changes or the follower is + // re-fortified, which means we'll only ever skip the initial part of the + // election timeout once per fortified -> no longer fortified transition. + r.deFortify(r.id, r.Term) + if r.electionElapsed == 0 { + // NB: The peer was supporting a leader who had fortified until the last + // tick, but that support has now expired. As a result: + // 1. We don't want to wait out an entire election timeout before + // campaigning. + // 2. But we do want to take advantage of randomized election timeouts + // built into raft to prevent hung elections. + // We achieve both of these goals by "forwarding" electionElapsed to begin + // at r.electionTimeout. Also see pastElectionTimeout. + r.logger.Debugf( + "%d setting election elapsed to start from %d ticks after store liveness support expired", + r.id, r.electionTimeout, + ) + r.electionElapsed = r.electionTimeout - 1 // -1 because we'll add one below. + } + } r.electionElapsed++ if r.promotable() && r.pastElectionTimeout() { @@ -877,6 +970,8 @@ func (r *raft) tickElection() { // tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. func (r *raft) tickHeartbeat() { + assertTrue(r.state == StateLeader, "tickHeartbeat called by non-leader") + r.heartbeatElapsed++ r.electionElapsed++ @@ -885,18 +980,17 @@ func (r *raft) tickHeartbeat() { if r.checkQuorum { if err := r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}); err != nil { r.logger.Debugf("error occurred during checking sending heartbeat: %v", err) + } else if r.state != StateLeader { + return // stepped down } } - // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. - if r.state == StateLeader && r.leadTransferee != None { + // If current leader cannot transfer leadership in electionTimeout, it stops + // trying and begins accepting new proposals again. + if r.leadTransferee != None { r.abortLeaderTransfer() } } - if r.state != StateLeader { - return - } - if r.heartbeatElapsed >= r.heartbeatTimeout { r.heartbeatElapsed = 0 if err := r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}); err != nil { @@ -937,6 +1031,9 @@ func (r *raft) becomePreCandidate() { if r.state == StateLeader { panic("invalid transition [leader -> pre-candidate]") } + assertTrue(!r.supportingFortifiedLeader() || r.lead == r.id, + "should not be supporting a fortified leader when becoming pre-candidate; leader exempted", + ) // Becoming a pre-candidate changes our step functions and state, // but doesn't change anything else. In particular it does not increase // r.Term or change r.Vote. @@ -948,6 +1045,7 @@ func (r *raft) becomePreCandidate() { // revoked StoreLiveness support for the leader's store to begin with. It's // a bit weird from the perspective of raft though. See if we can avoid this. r.lead = None + r.leadEpoch = 0 r.state = StatePreCandidate r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) } @@ -996,16 +1094,20 @@ func (r *raft) hup(t CampaignType) { r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) return } - // TODO(arul): we will eventually want some kind of logic like this. - // - //if r.supportingFortifiedLeader() && t != campaignTransfer { - // r.logger.Debugf("%x ignoring MsgHup due to leader fortification", r.id) - // return - //} if !r.promotable() { r.logger.Warningf("%x is unpromotable and can not campaign", r.id) return } + // NB: The leader is allowed to bump its term by calling an election. Note that + // we must take care to ensure the leader's support expiration doesn't regress. + // + // TODO(arul): add special handling for the r.lead == r.id case with an + // assertion to ensure the LeaderSupportExpiration is in the past before + // campaigning. + if r.supportingFortifiedLeader() && r.lead != r.id { + r.logger.Debugf("%x ignoring MsgHup due to leader fortification", r.id) + return + } if r.hasUnappliedConfChanges() { r.logger.Warningf("%x cannot campaign at term %d since there are still pending configuration changes to apply", r.id, r.Term) return @@ -1019,15 +1121,14 @@ func (r *raft) hup(t CampaignType) { // support to a leader. When a peer is providing support to a leader, it must // not campaign or vote to disrupt that leader's term, unless specifically asked // to do so by the leader. -// TODO(arul): this is a placeholder implementation. Move it around as you see -// fit. func (r *raft) supportingFortifiedLeader() bool { if r.leadEpoch == 0 { return false // not supporting any leader } - assertTrue(r.lead != None, "leader epoch is set but leader is not") - epoch, ok := r.storeLiveness.SupportFor(r.lead) - return ok && epoch == r.leadEpoch + assertTrue(r.lead != None, "lead epoch is set but leader is not") + epoch, live := r.storeLiveness.SupportFor(r.lead) + assertTrue(epoch >= r.leadEpoch, "epochs in store liveness shouldn't regress") + return live && epoch == r.leadEpoch } // errBreak is a sentinel error used to break a callback-based loop. @@ -1133,17 +1234,47 @@ func (r *raft) Step(m pb.Message) error { case m.Term > r.Term: if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { force := bytes.Equal(m.Context, []byte(campaignTransfer)) - inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout - if !force && inLease { - // If a server receives a RequestVote request within the minimum election timeout - // of hearing from a current leader, it does not update its term or grant its vote - last := r.raftLog.lastEntryID() - // TODO(pav-kv): it should be ok to simply print the %+v of the lastEntryID. - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", - r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) - return nil + inHeartbeatLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + // NB: A fortified leader is allowed to bump its term. It'll need to + // re-fortify once if it gets elected at the higher term though, so the + // leader must take care to not regress its supported expiration. However, + // at the follower, we grant the fortified leader our vote at the higher + // term. + inFortifyLease := r.supportingFortifiedLeader() && r.lead != m.From + if !force && (inHeartbeatLease || inFortifyLease) { + // If a server receives a Request{,Pre}Vote message but is still + // supporting a fortified leader, it does not update its term or grant + // its vote. Similarly, if a server receives a Request{,Pre}Vote message + // within the minimum election timeout of hearing from the current + // leader it does not update its term or grant its vote. + { + // Log why we're ignoring the Request{,Pre}Vote. + var inHeartbeatLeaseMsg string + var inFortifyLeaseMsg string + var sep string + if inHeartbeatLease { + inHeartbeatLeaseMsg = fmt.Sprintf("recently received communication from leader (remaining ticks: %d)", r.electionTimeout-r.electionElapsed) + } + if inFortifyLease { + inFortifyLeaseMsg = fmt.Sprintf("supporting fortified leader %d at epoch %d", r.lead, r.leadEpoch) + } + if inFortifyLease && inHeartbeatLease { + sep = " and " + } + last := r.raftLog.lastEntryID() + // TODO(pav-kv): it should be ok to simply print the %+v of the + // lastEntryID. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: %s%s%s", + r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, inHeartbeatLeaseMsg, sep, inFortifyLeaseMsg) + } + return nil // don't update term/grant vote; early return } + // If we're willing to vote in this election at a higher term, then make + // sure we have withdrawn our support for the current leader, if we're + // still providing it support. + r.deFortify(m.From, m.Term) } + switch { case m.Type == pb.MsgPreVote: // Never change our term in response to a PreVote @@ -1156,7 +1287,11 @@ func (r *raft) Step(m pb.Message) error { default: r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", r.id, r.Term, m.Type, m.From, m.Term) - if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap { + if IsMsgFromLeader(m.Type) { + // We've just received a message from the new leader which was elected + // at a higher term. The old leader's fortification support has expired, + // so it's safe to defortify at this point. + r.deFortify(m.From, m.Term) r.becomeFollower(m.Term, m.From) } else { r.becomeFollower(m.Term, None) @@ -1219,7 +1354,7 @@ func (r *raft) Step(m pb.Message) error { r.appliedSnap(m.Snapshot) } if m.Index != 0 { - r.raftLog.stableTo(logMark{term: m.LogTerm, index: m.Index}) + r.raftLog.stableTo(LogMark{Term: m.LogTerm, Index: m.Index}) } case pb.MsgStorageApplyResp: @@ -1405,7 +1540,7 @@ func stepLeader(r *raft, m pb.Message) error { // an MsgAppResp to acknowledge the appended entries in the last Ready. pr.RecentActive = true - + pr.MaybeUpdateMatchCommit(m.Commit) if m.Reject { // RejectHint is the suggested next base entry for appending (i.e. // we try to append entry RejectHint+1 next), and LogTerm is the @@ -1580,7 +1715,7 @@ func stepLeader(r *raft, m pb.Message) error { // Transfer leadership is in progress. if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) - r.sendTimeoutNow(m.From) + r.transferLeader(m.From) } } } @@ -1636,13 +1771,23 @@ func stepLeader(r *raft, m pb.Message) error { } // Transfer leadership to third party. r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) - // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + // Transfer leadership should be finished in one electionTimeout, so reset + // r.electionElapsed. If the transfer target is not caught up on its log by + // then, the transfer is aborted and the leader can resume normal operation. + // See raft.abortLeaderTransfer. r.electionElapsed = 0 r.leadTransferee = leadTransferee if pr.Match == r.raftLog.lastIndex() { - r.sendTimeoutNow(leadTransferee) r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + r.transferLeader(leadTransferee) } else { + // If the transfer target is not initially caught up on its log, we don't + // send it a MsgTimeoutNow immediately. Instead, we eagerly try to catch + // it up on its log so that it will be able to win the election when it + // campaigns (recall that a candidate can only win an election if its log + // is up-to-date). If we are able to successfully catch it up in time, + // before the electionElapsed timeout fires, we call raft.transferLeader + // in response to receiving the final MsgAppResp. pr.MsgAppProbesPaused = false r.maybeSendAppend(leadTransferee) } @@ -1761,6 +1906,7 @@ func stepFollower(r *raft, m pb.Message) error { } r.logger.Infof("%x forgetting leader %x at term %d", r.id, r.lead, r.Term) r.lead = None + r.leadEpoch = 0 case pb.MsgTimeoutNow: // TODO(nvanbenschoten): we will eventually want some kind of logic like // this. However, even this may not be enough, because we're calling a @@ -1776,6 +1922,12 @@ func stepFollower(r *raft, m pb.Message) error { // Leadership transfers never use pre-vote even if r.preVote is true; we // know we are not recovering from a partition so there is no need for the // extra round trip. + // TODO(nvanbenschoten): Once the TODO above is addressed, and assuming its + // handled by ensuring MsgTimeoutNow only comes from the leader, we should + // be able to replace this leadEpoch assignment with a call to deFortify. + // Currently, it may panic because only the leader should be able to + // de-fortify without bumping the term. + r.leadEpoch = 0 r.hup(campaignTransfer) } return nil @@ -1815,7 +1967,7 @@ func (r *raft) handleAppendEntries(m pb.Message) { // committed entries at m.Term (by raft invariants), so it is safe to bump // the commit index even if the MsgApp is stale. lastIndex := a.lastIndex() - r.raftLog.commitTo(logMark{term: m.Term, index: min(m.Commit, lastIndex)}) + r.raftLog.commitTo(LogMark{Term: m.Term, Index: min(m.Commit, lastIndex)}) r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: lastIndex, Commit: r.raftLog.committed}) return @@ -1893,8 +2045,8 @@ func (r *raft) handleHeartbeat(m pb.Message) { // commit index if accTerm >= m.Term. // TODO(pav-kv): move this logic to raftLog.commitTo, once the accTerm has // migrated to raftLog/unstable. - mark := logMark{term: m.Term, index: min(m.Commit, r.raftLog.lastIndex())} - if mark.term == r.raftLog.accTerm() { + mark := LogMark{Term: m.Term, Index: min(m.Commit, r.raftLog.lastIndex())} + if mark.Term == r.raftLog.accTerm() { r.raftLog.commitTo(mark) } r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp}) @@ -1953,8 +2105,25 @@ func (r *raft) handleFortify(m pb.Message) { func (r *raft) handleFortifyResp(m pb.Message) { assertTrue(r.state == StateLeader, "only leaders should be handling fortification responses") - // TODO(arul): record support once - // https://github.com/cockroachdb/cockroach/issues/125264 lands. + if m.Reject { + // Couldn't successfully fortify the follower. Typically, this happens when + // the follower isn't supporting the leader's store in StoreLiveness or the + // follower is down. We'll try to fortify the follower again later in + // tickHeartbeat. + return + } + r.supportTracker.RecordSupport(m.From, m.LeadEpoch) +} + +// deFortify (conceptually) revokes previously provided fortification support to +// a leader. +func (r *raft) deFortify(from pb.PeerID, term uint64) { + assertTrue(term > r.Term || + (term == r.Term && from == r.lead) || + (term == r.Term && from == r.id && !r.supportingFortifiedLeader()), + "can only defortify at current term if told by the leader or if fortification support has expired", + ) + r.leadEpoch = 0 } // restore recovers the state machine from a snapshot. It restores the log and the @@ -2157,11 +2326,17 @@ func (r *raft) resetRandomizedElectionTimeout() { r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) } -func (r *raft) sendTimeoutNow(to pb.PeerID) { +func (r *raft) transferLeader(to pb.PeerID) { + assertTrue(r.state == StateLeader, "only the leader can transfer leadership") r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) + r.becomeFollower(r.Term, r.lead) } func (r *raft) abortLeaderTransfer() { + // TODO(arul): we currently call this method regardless of whether we are the + // leader or not and regardless of whether there is an in-progress leadership + // transfer. We should consider limiting the cases where this can be called + // and adding the appropriate preconditions as assertions. r.leadTransferee = None } @@ -2201,3 +2376,20 @@ func (r *raft) reduceUncommittedSize(s entryPayloadSize) { r.uncommittedSize -= s } } + +func (r *raft) testingStepDown() error { + if r.lead != r.id { + return errors.New("cannot step down if not the leader") + } + r.becomeFollower(r.Term, r.id) // mirror the logic in how we step down when CheckQuorum fails + return nil +} + +// advanceCommitViaMsgAppOnly returns true if the commit index is advanced on +// the followers using MsgApp only. This means that heartbeats are not used to +// advance the commit index. This function returns true only if all followers +// communicate their durable commit index back to the leader via MsgAppResp. +func (r *raft) advanceCommitViaMsgAppOnly() bool { + return r.crdbVersion.IsActive(context.Background(), + clusterversion.V24_3_AdvanceCommitIndexViaMsgApps) +} diff --git a/pkg/raft/raft_test.go b/pkg/raft/raft_test.go index c5d547a5fb86..487f3e5f3f68 100644 --- a/pkg/raft/raft_test.go +++ b/pkg/raft/raft_test.go @@ -27,6 +27,7 @@ import ( pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" "github.com/cockroachdb/cockroach/pkg/raft/tracker" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -353,8 +354,9 @@ func TestLearnerElectionTimeout(t *testing.T) { // TestLearnerPromotion verifies that the learner should not election until // it is promoted to a normal peer. func TestLearnerPromotion(t *testing.T) { - n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2))) - n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2))) + n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)), withFortificationDisabled()) + + n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)), withFortificationDisabled()) n1.becomeFollower(1, None) n2.becomeFollower(1, None) @@ -420,9 +422,9 @@ func TestLeaderCyclePreVote(t *testing.T) { // pre-vote) work when not starting from a clean slate (as they do in // TestLeaderElection) func testLeaderCycle(t *testing.T, preVote bool) { - var cfg func(*Config) + cfg := fortificationDisabledConfig if preVote { - cfg = preVoteConfig + cfg = preVoteConfigWithFortificationDisabled } n := newNetworkWithConfig(cfg, nil, nil, nil) for campaignerID := pb.PeerID(1); campaignerID <= 3; campaignerID++ { @@ -570,14 +572,14 @@ func TestLogReplication(t *testing.T) { wcommitted uint64 }{ { - newNetwork(nil, nil, nil), + newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil), []pb.Message{ {From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}}, }, 2, }, { - newNetwork(nil, nil, nil), + newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil), []pb.Message{ {From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}}, {From: 1, To: 2, Type: pb.MsgHup}, @@ -671,7 +673,7 @@ func TestSingleNodeCommit(t *testing.T) { // when leader changes, no new proposal comes in and ChangeTerm proposal is // filtered. func TestCannotCommitWithoutNewTermEntry(t *testing.T) { - tt := newNetwork(nil, nil, nil, nil, nil) + tt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil, nil, nil) tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) // 0 cannot reach 2,3,4 @@ -709,7 +711,7 @@ func TestCannotCommitWithoutNewTermEntry(t *testing.T) { // TestCommitWithoutNewTermEntry tests the entries could be committed // when leader changes, no new proposal comes in. func TestCommitWithoutNewTermEntry(t *testing.T) { - tt := newNetwork(nil, nil, nil, nil, nil) + tt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil, nil, nil) tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) // 0 cannot reach 3,4,5 @@ -738,9 +740,9 @@ func TestDuelingCandidates(t *testing.T) { s1 := newTestMemoryStorage(withPeers(1, 2, 3)) s2 := newTestMemoryStorage(withPeers(1, 2, 3)) s3 := newTestMemoryStorage(withPeers(1, 2, 3)) - a := newTestRaft(1, 10, 1, s1) - b := newTestRaft(2, 10, 1, s2) - c := newTestRaft(3, 10, 1, s3) + a := newTestRaft(1, 10, 1, s1, withFortificationDisabled()) + b := newTestRaft(2, 10, 1, s2, withFortificationDisabled()) + c := newTestRaft(3, 10, 1, s3, withFortificationDisabled()) nt := newNetwork(a, b, c) nt.cut(1, 3) @@ -783,9 +785,9 @@ func TestDuelingCandidates(t *testing.T) { } func TestDuelingPreCandidates(t *testing.T) { - cfgA := newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - cfgB := newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - cfgC := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + cfgA := newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + cfgB := newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + cfgC := newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) cfgA.PreVote = true cfgB.PreVote = true cfgC.PreVote = true @@ -884,7 +886,7 @@ func TestSingleNodePreCandidate(t *testing.T) { } func TestOldMessages(t *testing.T) { - tt := newNetwork(nil, nil, nil) + tt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil) // make 0 leader @ term 3 tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup}) @@ -1132,7 +1134,7 @@ func TestHandleMsgApp(t *testing.T) { } } -// TestHandleHeartbeat ensures that the follower commits to the commit in the message. +// TestHandleHeartbeat ensures that the follower handles heartbeats properly. func TestHandleHeartbeat(t *testing.T) { commit := uint64(2) tests := []struct { @@ -1157,9 +1159,8 @@ func TestHandleHeartbeat(t *testing.T) { require.NoError(t, storage.Append(init.entries)) sm := newTestRaft(1, 5, 1, storage) sm.becomeFollower(init.term, 2) - sm.raftLog.commitTo(logMark{term: init.term, index: commit}) + sm.raftLog.commitTo(LogMark{Term: init.term, Index: commit}) sm.handleHeartbeat(tt.m) - assert.Equal(t, tt.wCommit, sm.raftLog.committed, "#%d", i) m := sm.readMessages() require.Len(t, m, 1, "#%d", i) assert.Equal(t, pb.MsgHeartbeatResp, m[0].Type, "#%d", i) @@ -1174,7 +1175,7 @@ func TestHandleHeartbeatResp(t *testing.T) { sm := newTestRaft(1, 5, 1, storage) sm.becomeCandidate() sm.becomeLeader() - sm.raftLog.commitTo(logMark{term: 3, index: sm.raftLog.lastIndex()}) + sm.raftLog.commitTo(LogMark{Term: 3, Index: sm.raftLog.lastIndex()}) // A heartbeat response from a node that is behind; re-send MsgApp sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp}) @@ -1190,9 +1191,10 @@ func TestHandleHeartbeatResp(t *testing.T) { // Once we have an MsgAppResp, heartbeats no longer send MsgApp. sm.Step(pb.Message{ - From: 2, - Type: pb.MsgAppResp, - Index: msgs[0].Index + uint64(len(msgs[0].Entries)), + From: 2, + Type: pb.MsgAppResp, + Index: msgs[0].Index + uint64(len(msgs[0].Entries)), + Commit: sm.raftLog.lastIndex(), }) // Consume the message sent in response to MsgAppResp sm.readMessages() @@ -1448,9 +1450,15 @@ func TestCandidateResetTermMsgApp(t *testing.T) { // MsgHeartbeat or MsgApp from leader, "Step" resets the term // with leader's and reverts back to follower. func testCandidateResetTerm(t *testing.T, mt pb.MessageType) { - a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + a := newTestRaft( + 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + b := newTestRaft( + 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + c := newTestRaft( + 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) nt := newNetwork(a, b, c) @@ -1614,9 +1622,14 @@ func TestLeaderStepdownWhenQuorumLost(t *testing.T) { } func TestLeaderSupersedingWithCheckQuorum(t *testing.T) { - a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + a := newTestRaft( + 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + b := newTestRaft( + 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + c := newTestRaft( + 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) a.checkQuorum = true b.checkQuorum = true @@ -1648,9 +1661,15 @@ func TestLeaderSupersedingWithCheckQuorum(t *testing.T) { } func TestLeaderElectionWithCheckQuorum(t *testing.T) { - a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + a := newTestRaft( + 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + b := newTestRaft( + 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + c := newTestRaft( + 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) a.checkQuorum = true b.checkQuorum = true @@ -1687,9 +1706,15 @@ func TestLeaderElectionWithCheckQuorum(t *testing.T) { // can disrupt the leader even if the leader still "officially" holds the lease, The // leader is expected to step down and adopt the candidate's term func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) { - a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - c := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + a := newTestRaft( + 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + b := newTestRaft( + 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + c := newTestRaft( + 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) a.checkQuorum = true b.checkQuorum = true @@ -1760,9 +1785,15 @@ func TestNonPromotableVoterWithCheckQuorum(t *testing.T) { // candiate's response to late leader heartbeat forces the leader // to step down. func TestDisruptiveFollower(t *testing.T) { - n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + n1 := newTestRaft( + 1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + n2 := newTestRaft( + 2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) + n3 := newTestRaft( + 3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled(), + ) n1.checkQuorum = true n2.checkQuorum = true @@ -1838,9 +1869,9 @@ func TestDisruptiveFollower(t *testing.T) { // Then pre-vote phase prevents this isolated node from forcing // current leader to step down, thus less disruptions. func TestDisruptiveFollowerPreVote(t *testing.T) { - n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) n1.checkQuorum = true n2.checkQuorum = true @@ -1976,20 +2007,10 @@ func TestBcastBeat(t *testing.T) { msgs := sm.readMessages() require.Len(t, msgs, 2) - wantCommitMap := map[pb.PeerID]uint64{ - 2: min(sm.raftLog.committed, sm.trk.Progress(2).Match), - 3: min(sm.raftLog.committed, sm.trk.Progress(3).Match), - } for i, m := range msgs { require.Equal(t, pb.MsgHeartbeat, m.Type, "#%d", i) require.Zero(t, m.Index, "#%d", i) require.Zero(t, m.LogTerm, "#%d", i) - - commit, ok := wantCommitMap[m.To] - require.True(t, ok, "#%d", i) - require.Equal(t, commit, m.Commit, "#%d", i) - delete(wantCommitMap, m.To) - require.Empty(t, m.Entries, "#%d", i) } } @@ -2329,7 +2350,7 @@ func TestRestoreIgnoreSnapshot(t *testing.T) { storage := newTestMemoryStorage(withPeers(1, 2)) sm := newTestRaft(1, 10, 1, storage) require.True(t, sm.raftLog.append(init)) - sm.raftLog.commitTo(logMark{term: init.term, index: commit}) + sm.raftLog.commitTo(LogMark{Term: init.term, Index: commit}) s := snapshot{ term: 1, @@ -2726,8 +2747,9 @@ func TestCommitAfterRemoveNode(t *testing.T) { require.Equal(t, []byte("hello"), ents[0].Data) } -// TestLeaderTransferToUpToDateNode verifies transferring should succeed -// if the transferee has the most up-to-date log entries when transfer starts. +// TestLeaderTransferToUpToDateNode verifies transferring should start +// immediately if the transferee has the most up-to-date log entries when +// transfer is requested. func TestLeaderTransferToUpToDateNode(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) @@ -2749,11 +2771,11 @@ func TestLeaderTransferToUpToDateNode(t *testing.T) { checkLeaderTransferState(t, lead, StateLeader, 1) } -// TestLeaderTransferToUpToDateNodeFromFollower verifies transferring should succeed -// if the transferee has the most up-to-date log entries when transfer starts. -// Not like TestLeaderTransferToUpToDateNode, where the leader transfer message -// is sent to the leader, in this test case every leader transfer message is sent -// to the follower. +// TestLeaderTransferToUpToDateNodeFromFollower verifies transferring should +// start immediately if the transferee has the most up-to-date log entries when +// transfer starts. Unlike TestLeaderTransferToUpToDateNode, where the leader +// transfer message is sent to the leader, in this test case every leader +// transfer message is sent to the follower and is redirected to the leader. func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) @@ -2775,8 +2797,47 @@ func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) { checkLeaderTransferState(t, lead, StateLeader, 1) } +// TestLeaderTransferLeaderStepsDownImmediately verifies that the outgoing +// leader steps down to a follower as soon as it sends a MsgTimeoutNow to the +// transfer target, even before (and regardless of if) the target receives the +// MsgTimeoutNow and campaigns. +func TestLeaderTransferLeaderStepsDownImmediately(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + + // Isolate node 3. It is up-to-date, so the leadership transfer will be + // initiated immediately, but node 3 will never receive the MsgTimeoutNow and + // call an election. + nt.isolate(3) + + lead := nt.peers[1].(*raft) + require.Equal(t, uint64(1), lead.Term) + require.Equal(t, pb.PeerID(1), lead.lead) + + // Transfer leadership to 3. The leader steps down immediately in the same + // term, waiting for the transfer target to call an election. + nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) + + require.Equal(t, uint64(1), lead.Term) + checkLeaderTransferState(t, lead, StateFollower, 1) + + // TODO(arul): a leader that steps down will currently never campaign due to + // the fortification promise that it made to itself. We'll need to fix this. + lead.deFortify(lead.lead, lead.Term) + + // Eventually, the previous leader gives up on waiting and calls an election + // to reestablish leadership at the next term. + for i := 0; i < lead.randomizedElectionTimeout; i++ { + lead.tick() + } + nt.send(lead.readMessages()...) + + require.Equal(t, uint64(2), lead.Term) + checkLeaderTransferState(t, lead, StateLeader, 1) +} + // TestLeaderTransferWithCheckQuorum ensures transferring leader still works -// even the current leader is still under its leader lease +// even the current leader is still under its leader lease. func TestLeaderTransferWithCheckQuorum(t *testing.T) { nt := newNetwork(nil, nil, nil) for i := 1; i < 4; i++ { @@ -2811,18 +2872,23 @@ func TestLeaderTransferWithCheckQuorum(t *testing.T) { } func TestLeaderTransferToSlowFollower(t *testing.T) { - defaultLogger.EnableDebug() nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) - nt.recover() lead := nt.peers[1].(*raft) + require.Equal(t, uint64(2), lead.trk.Progress(1).Match) require.Equal(t, uint64(1), lead.trk.Progress(3).Match) - // Transfer leadership to 3 when node 3 is lack of log. + // Reconnect node 3 and initiate a transfer of leadership from node 1 to node + // 3. The leader (node 1) will catch it up on log entries using MsgApps before + // transferring it leadership using MsgTimeoutNow. + nt.recover() nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) checkLeaderTransferState(t, lead, StateFollower, 3) @@ -2844,7 +2910,7 @@ func TestLeaderTransferAfterSnapshot(t *testing.T) { require.Equal(t, uint64(1), lead.trk.Progress(3).Match) filtered := pb.Message{} - // Snapshot needs to be applied before sending MsgAppResp + // Snapshot needs to be applied before sending MsgAppResp. nt.msgHook = func(m pb.Message) bool { if m.Type != pb.MsgAppResp || m.From != 3 || m.Reject { return true @@ -2852,12 +2918,12 @@ func TestLeaderTransferAfterSnapshot(t *testing.T) { filtered = m return false } - // Transfer leadership to 3 when node 3 is lack of snapshot. + // Transfer leadership to 3 when node 3 is missing a snapshot. nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) require.Equal(t, StateLeader, lead.state) require.NotEqual(t, pb.Message{}, filtered) - // Apply snapshot and resume progress + // Apply snapshot and resume progress. follower := nt.peers[3].(*raft) snap := follower.raftLog.nextUnstableSnapshot() nt.storage[3].ApplySnapshot(*snap) @@ -2893,7 +2959,11 @@ func TestLeaderTransferTimeout(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. If it were, we couldn't test the timeout. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) @@ -2919,32 +2989,40 @@ func TestLeaderTransferIgnoreProposal(t *testing.T) { nt := newNetwork(r, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) - lead := nt.peers[1].(*raft) - - nextEnts(r, s) // handle empty entry - - // Transfer leadership to isolated node to let transfer pending, then send proposal. + // Transfer leadership to the isolated, behind node. This will leave the + // transfer in a pending state as the leader tries to catch up the target. nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) + lead := nt.peers[1].(*raft) require.Equal(t, pb.PeerID(3), lead.leadTransferee) - nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) + // Then send proposal. This should be dropped. err := lead.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) require.Equal(t, ErrProposalDropped, err) + require.Equal(t, pb.PeerID(3), lead.leadTransferee) - require.Equal(t, uint64(1), lead.trk.Progress(1).Match) + require.Equal(t, uint64(2), lead.trk.Progress(1).Match) } func TestLeaderTransferReceiveHigherTermVote(t *testing.T) { - nt := newNetwork(nil, nil, nil) + nt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) - // Transfer leadership to isolated node to let transfer pending. + // Transfer leadership to the isolated, behind node. This will leave the + // transfer in a pending state as the leader tries to catch up the target. nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) require.Equal(t, pb.PeerID(3), lead.leadTransferee) @@ -2957,11 +3035,15 @@ func TestLeaderTransferRemoveNode(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) - nt.ignore(pb.MsgTimeoutNow) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. + nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) - // The leadTransferee is removed when leadship transferring. + // The leadTransferee is removed with leadership transfer in progress. nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) require.Equal(t, pb.PeerID(3), lead.leadTransferee) @@ -2974,11 +3056,15 @@ func TestLeaderTransferDemoteNode(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) - nt.ignore(pb.MsgTimeoutNow) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. + nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) - // The leadTransferee is demoted when leadship transferring. + // The leadTransferee is demoted with leadership transfer in progress. nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) require.Equal(t, pb.PeerID(3), lead.leadTransferee) @@ -3000,12 +3086,17 @@ func TestLeaderTransferDemoteNode(t *testing.T) { checkLeaderTransferState(t, lead, StateLeader, 1) } -// TestLeaderTransferBack verifies leadership can transfer back to self when last transfer is pending. +// TestLeaderTransferBack verifies leadership can transfer back to self when +// last transfer is pending, which cancels the transfer attempt. func TestLeaderTransferBack(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) @@ -3018,13 +3109,18 @@ func TestLeaderTransferBack(t *testing.T) { checkLeaderTransferState(t, lead, StateLeader, 1) } -// TestLeaderTransferSecondTransferToAnotherNode verifies leader can transfer to another node -// when last transfer is pending. +// TestLeaderTransferSecondTransferToAnotherNode verifies leader can transfer to +// another node when last transfer is pending, which cancels the previous +// transfer attempt and starts a new one. func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) @@ -3037,13 +3133,18 @@ func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) { checkLeaderTransferState(t, lead, StateFollower, 2) } -// TestLeaderTransferSecondTransferToSameNode verifies second transfer leader request -// to the same node should not extend the timeout while the first one is pending. +// TestLeaderTransferSecondTransferToSameNode verifies second transfer leader +// request to the same node should not extend the timeout while the first one is +// pending. func TestLeaderTransferSecondTransferToSameNode(t *testing.T) { nt := newNetwork(nil, nil, nil) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + // Isolate node 3 and propose an entry on 1. This will cause node 3 to fall + // behind on its log, so that the leadership transfer won't be initiated + // immediately. nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}) lead := nt.peers[1].(*raft) @@ -3069,11 +3170,10 @@ func checkLeaderTransferState(t *testing.T, r *raft, state StateType, lead pb.Pe require.Equal(t, None, r.leadTransferee) } -// TestTransferNonMember verifies that when a MsgTimeoutNow arrives at -// a node that has been removed from the group, nothing happens. -// (previously, if the node also got votes, it would panic as it -// transitioned to StateLeader) -func TestTransferNonMember(t *testing.T) { +// TestLeaderTransferNonMember verifies that when a MsgTimeoutNow arrives at a +// node that has been removed from the group, nothing happens. (previously, if +// the node also got votes, it would panic as it transitioned to StateLeader). +func TestLeaderTransferNonMember(t *testing.T) { r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(2, 3, 4))) r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow}) @@ -3082,15 +3182,119 @@ func TestTransferNonMember(t *testing.T) { require.Equal(t, StateFollower, r.state) } +// TestLeaderTransferDifferentTerms verifies that a MsgTimeoutNow will only be +// respected if it is from the current term or from a new term. +func TestLeaderTransferDifferentTerms(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + + // Transfer leadership to node 2, then 3, to drive up the term. + nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader}) + nt.send(pb.Message{From: 3, To: 2, Type: pb.MsgTransferLeader}) + for i, p := range nt.peers { + r := p.(*raft) + expState := StateFollower + if i == 3 { + expState = StateLeader + } + require.Equal(t, expState, r.state) + require.Equal(t, uint64(3), r.Term) + } + + // Send a MsgTimeoutNow to node 1 from an old term. This should be ignored. + // This is important, as a MsgTimeoutNow allows a follower to call a "force" + // election, which bypasses pre-vote and leader support safeguards. We don't + // want a stale MsgTimeoutNow sent from an old leader giving a follower + // permission to overthrow a newer leader. + nt.send(pb.Message{From: 2, To: 1, Term: 2, Type: pb.MsgTimeoutNow}) + n1 := nt.peers[1].(*raft) + require.Equal(t, StateFollower, n1.state) + require.Equal(t, uint64(3), n1.Term) + + // Send a MsgTimeoutNow to node 1 from the current term. This should cause it + // to call an election for the _next_ term, which it will win. + nt.send(pb.Message{From: 3, To: 1, Term: 3, Type: pb.MsgTimeoutNow}) + require.Equal(t, StateLeader, n1.state) + require.Equal(t, uint64(4), n1.Term) + + // Send a MsgTimeoutNow to node 2 from a new term. This should advance the + // term on node 2 and cause it to call an election for the _next_ term, which + // it will win. + nt.send(pb.Message{From: 1, To: 2, Term: 5, Type: pb.MsgTimeoutNow}) + n2 := nt.peers[2].(*raft) + require.Equal(t, StateLeader, n2.state) + require.Equal(t, uint64(6), n2.Term) +} + +// TestLeaderTransferStaleFollower verifies that a MsgTimeoutNow received by a +// stale follower (a follower still at an earlier term) will cause the follower +// to call an election which it can not win. +func TestLeaderTransferStaleFollower(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + n1 := nt.peers[1].(*raft) + n2 := nt.peers[2].(*raft) + n3 := nt.peers[3].(*raft) + nodes := []*raft{n1, n2, n3} + + // Attempt to transfer leadership to node 3. The MsgTimeoutNow is sent + // immediately and node 1 steps down as leader, but node 3 does not receive + // the message due to a network partition. + nt.isolate(3) + nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader}) + for _, n := range nodes { + require.Equal(t, StateFollower, n.state) + require.Equal(t, uint64(1), n.Term) + } + + // TODO(arul): a leader that steps down will currently never campaign due to + // the fortification promise that it made to itself. We'll need to fix this. + n1.deFortify(n1.lead, n1.Term) + + // Eventually, the previous leader gives up on waiting and calls an election + // to reestablish leadership at the next term. Node 3 does not hear about this + // either. + for i := 0; i < n1.randomizedElectionTimeout; i++ { + n1.tick() + } + nt.send(nt.filter(n1.readMessages())...) + for _, n := range nodes { + expState := StateFollower + if n == n1 { + expState = StateLeader + } + expTerm := uint64(2) + if n == n3 { + expTerm = 1 + } + require.Equal(t, expState, n.state) + require.Equal(t, expTerm, n.Term) + } + + // The network partition heals and n3 receives the lost MsgTimeoutNow that n1 + // had previously tried to send to it back in term 1. It calls an unsuccessful + // election, through which it learns about the new leadership term. + nt.recover() + nt.send(pb.Message{From: 1, To: 3, Term: 1, Type: pb.MsgTimeoutNow}) + for _, n := range nodes { + expState := StateFollower + if n == n1 { + expState = StateLeader + } + require.Equal(t, expState, n.state) + require.Equal(t, uint64(2), n.Term) + } +} + // TestNodeWithSmallerTermCanCompleteElection tests the scenario where a node // that has been partitioned away (and fallen behind) rejoins the cluster at // about the same time the leader node gets partitioned away. // Previously the cluster would come to a standstill when run with PreVote // enabled. func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) { - n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) n1.becomeFollower(1, None) n2.becomeFollower(1, None) @@ -3155,9 +3359,9 @@ func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) { // TestPreVoteWithSplitVote verifies that after split vote, cluster can complete // election in next round. func TestPreVoteWithSplitVote(t *testing.T) { - n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) n1.becomeFollower(1, None) n2.becomeFollower(1, None) @@ -3278,9 +3482,9 @@ func TestLearnerCampaign(t *testing.T) { // n2 is follower with term 2 // n3 is partitioned, with term 4 and less log, state is candidate func newPreVoteMigrationCluster(t *testing.T) *network { - n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) - n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3))) + n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) + n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)), withFortificationDisabled()) n1.becomeFollower(1, None) n2.becomeFollower(1, None) @@ -3377,7 +3581,7 @@ func TestPreVoteMigrationWithFreeStuckPreCandidate(t *testing.T) { } func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) { - nt := newNetwork(nil, nil, nil) + nt := newNetworkWithConfig(fortificationDisabledConfig, nil, nil, nil) n1 := nt.peers[1].(*raft) n2 := nt.peers[2].(*raft) nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) @@ -3418,29 +3622,34 @@ func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) { // Transfer leadership to peer 2. nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader}) - assert.Equal(t, StateLeader, n1.state) - // It's still follower because committed conf change is not applied. + // The outgoing leader steps down immediately. + assert.Equal(t, StateFollower, n1.state) + // The transfer target does not campaign immediately because the committed + // conf change is not applied. assert.Equal(t, StateFollower, n2.state) - // Abort transfer leader - for i := 0; i < n1.electionTimeout; i++ { + // TODO(arul): a leader that steps down will currently never campaign due to + // the fortification promise that it made to itself. We'll need to fix this. + n1.deFortify(n1.lead, n1.Term) + + // Advance apply on node 1 and re-establish leadership. + nextEnts(n1, nt.storage[1]) + for i := 0; i < n1.randomizedElectionTimeout; i++ { n1.tick() } + nt.send(n1.readMessages()...) + assert.Equal(t, StateLeader, n1.state) - // Advance apply + // Advance apply on node 2. nextEnts(n2, nt.storage[2]) // Transfer leadership to peer 2 again. nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader}) + // The outgoing leader steps down immediately. assert.Equal(t, StateFollower, n1.state) + // The transfer target campaigns immediately now that the committed conf + // change is applied. assert.Equal(t, StateLeader, n2.state) - - nextEnts(n1, nt.storage[1]) - // Trigger campaign in node 2 - for i := 0; i < n1.randomizedElectionTimeout; i++ { - n1.tick() - } - assert.Equal(t, StateCandidate, n1.state) } // TestConfChangeCheckBeforeCampaign tests if unapplied ConfChange is checked before campaign. @@ -3785,6 +3994,15 @@ func preVoteConfig(c *Config) { c.PreVote = true } +func fortificationDisabledConfig(c *Config) { + c.StoreLiveness = raftstoreliveness.Disabled{} +} + +func preVoteConfigWithFortificationDisabled(c *Config) { + c.PreVote = true + c.StoreLiveness = raftstoreliveness.Disabled{} +} + func (nw *network) send(msgs ...pb.Message) { for len(msgs) > 0 { m := msgs[0] @@ -3885,7 +4103,35 @@ func SetRandomizedElectionTimeout(r *RawNode, v int) { setRandomizedElectionTimeout(r.raft, v) } -func newTestConfig(id pb.PeerID, election, heartbeat int, storage Storage) *Config { +// testConfigModifiers allows callers to optionally modify newTestConfig. +type testConfigModifiers struct { + testingDisableFortification bool +} + +// testConfigModifierOpt is the type of an optional parameter to newTestConfig +// that may be used to modify the config. +type testConfigModifierOpt func(*testConfigModifiers) + +// withRaftFortification disables raft fortification. +func withFortificationDisabled() testConfigModifierOpt { + return func(modifier *testConfigModifiers) { + modifier.testingDisableFortification = true + } +} + +func newTestConfig( + id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt, +) *Config { + modifiers := testConfigModifiers{} + for _, opt := range opts { + opt(&modifiers) + } + var storeLiveness raftstoreliveness.StoreLiveness + if modifiers.testingDisableFortification { + storeLiveness = raftstoreliveness.Disabled{} + } else { + storeLiveness = raftstoreliveness.AlwaysLive{} + } return &Config{ ID: id, ElectionTick: election, @@ -3893,7 +4139,8 @@ func newTestConfig(id pb.PeerID, election, heartbeat int, storage Storage) *Conf Storage: storage, MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, - StoreLiveness: raftstoreliveness.AlwaysLive{}, + StoreLiveness: storeLiveness, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } } @@ -3919,12 +4166,16 @@ func newTestMemoryStorage(opts ...testMemoryStorageOptions) *MemoryStorage { return ms } -func newTestRaft(id pb.PeerID, election, heartbeat int, storage Storage) *raft { - return newRaft(newTestConfig(id, election, heartbeat, storage)) +func newTestRaft( + id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt, +) *raft { + return newRaft(newTestConfig(id, election, heartbeat, storage, opts...)) } -func newTestLearnerRaft(id pb.PeerID, election, heartbeat int, storage Storage) *raft { - cfg := newTestConfig(id, election, heartbeat, storage) +func newTestLearnerRaft( + id pb.PeerID, election, heartbeat int, storage Storage, opts ...testConfigModifierOpt, +) *raft { + cfg := newTestConfig(id, election, heartbeat, storage, opts...) return newRaft(cfg) } diff --git a/pkg/raft/raftpb/confstate.go b/pkg/raft/raftpb/confstate.go index 121547f4b962..1d514d010e28 100644 --- a/pkg/raft/raftpb/confstate.go +++ b/pkg/raft/raftpb/confstate.go @@ -45,3 +45,10 @@ func (cs ConfState) Equivalent(cs2 ConfState) error { } return nil } + +func (cs ConfState) Describe() string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + cs.Voters, cs.VotersOutgoing, cs.Learners, cs.LearnersNext, cs.AutoLeave, + ) +} diff --git a/pkg/raft/raftstoreliveness/store_liveness.go b/pkg/raft/raftstoreliveness/store_liveness.go index c704f4e09fa1..edad788763b0 100644 --- a/pkg/raft/raftstoreliveness/store_liveness.go +++ b/pkg/raft/raftstoreliveness/store_liveness.go @@ -94,3 +94,29 @@ func (AlwaysLive) SupportFromEnabled() bool { func (AlwaysLive) SupportExpired(hlc.Timestamp) bool { return false } + +// Disabled is a mock implementation of StoreLiveness where store liveness +// is disabled. +type Disabled struct{} + +var _ StoreLiveness = Disabled{} + +// SupportFor implements the StoreLiveness interface. +func (Disabled) SupportFor(pb.PeerID) (pb.Epoch, bool) { + panic("unimplemented") +} + +// SupportFrom implements the StoreLiveness interface. +func (Disabled) SupportFrom(pb.PeerID) (pb.Epoch, hlc.Timestamp, bool) { + panic("should not be called without checking SupportFromEnabled") +} + +// SupportFromEnabled implements the StoreLiveness interface. +func (Disabled) SupportFromEnabled() bool { + return false // disabled +} + +// SupportExpired implements the StoreLiveness interface. +func (Disabled) SupportExpired(hlc.Timestamp) bool { + panic("unimplemented") +} diff --git a/pkg/raft/rafttest/BUILD.bazel b/pkg/raft/rafttest/BUILD.bazel index ec636a88a9ec..d7ebb0bacae1 100644 --- a/pkg/raft/rafttest/BUILD.bazel +++ b/pkg/raft/rafttest/BUILD.bazel @@ -34,10 +34,13 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/raft/rafttest", visibility = ["//visibility:public"], deps = [ + "//pkg/clusterversion", "//pkg/raft", "//pkg/raft/raftpb", "//pkg/raft/raftstoreliveness", "//pkg/raft/tracker", + "//pkg/roachpb", + "//pkg/settings/cluster", "//pkg/util/hlc", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", @@ -56,6 +59,5 @@ go_test( deps = [ "//pkg/raft", "//pkg/raft/raftpb", - "//pkg/testutils/skip", ], ) diff --git a/pkg/raft/rafttest/interaction_env_handler.go b/pkg/raft/rafttest/interaction_env_handler.go index 511385c75dc3..034f5b05a54b 100644 --- a/pkg/raft/rafttest/interaction_env_handler.go +++ b/pkg/raft/rafttest/interaction_env_handler.go @@ -47,7 +47,8 @@ func (env *InteractionEnv) Handle(t *testing.T, d datadriven.TestData) string { case "add-nodes": // Example: // - // add-nodes voters=(1 2 3) learners=(4 5) index=2 content=foo async-storage-writes=true + // add-nodes voters=(1 2 3) learners=(4 5) index=2 + // content=foo async-storage-writes=true crdb-version=24.3 err = env.handleAddNodes(t, d) case "campaign": // Example: @@ -159,6 +160,13 @@ func (env *InteractionEnv) Handle(t *testing.T, d datadriven.TestData) string { // // Example: send-snapshot 1 3 env.handleSendSnapshot(t, d) + case "step-down": + // Steps down as the leader. No-op if not the leader. + // + // Example: + // + // step-down 1 + err = env.handleStepDown(t, d) case "propose": // Propose an entry. // @@ -254,6 +262,17 @@ func (env *InteractionEnv) Handle(t *testing.T, d datadriven.TestData) string { // Explanation: // 1 (from_store) grants support for 2 (for_store) at a higher epoch. err = env.handleGrantSupport(t, d) + case "print-support-state": + // Prints the support state being tracked by a raft leader. Empty on a + // follower. + // + // print-support-state id + // Arguments are: + // id - id of the raft peer whose support map to print. + // + // Example: + // print-support-state 1 + err = env.handlePrintSupportState(t, d) default: err = fmt.Errorf("unknown command") diff --git a/pkg/raft/rafttest/interaction_env_handler_add_nodes.go b/pkg/raft/rafttest/interaction_env_handler_add_nodes.go index 3f55a5eceb9c..fd4f157d1be6 100644 --- a/pkg/raft/rafttest/interaction_env_handler_add_nodes.go +++ b/pkg/raft/rafttest/interaction_env_handler_add_nodes.go @@ -23,8 +23,11 @@ import ( "reflect" "testing" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/raft" pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/datadriven" ) @@ -64,6 +67,17 @@ func (env *InteractionEnv) handleAddNodes(t *testing.T, d datadriven.TestData) e arg.Scan(t, i, &cfg.DisableConfChangeValidation) case "step-down-on-removal": arg.Scan(t, i, &cfg.StepDownOnRemoval) + case "crdb-version": + var key string + arg.Scan(t, i, &key) + version, err := roachpb.ParseVersion(key) + if err != nil { + return err + } + settings := cluster.MakeTestingClusterSettingsWithVersions(version, + clusterversion.RemoveDevOffset(clusterversion.MinSupported.Version()), + true /* initializeVersion */) + cfg.CRDBVersion = settings.Version } } } @@ -130,6 +144,12 @@ func (env *InteractionEnv) AddNodes(n int, cfg raft.Config, snap pb.Snapshot) er env.Fabric.addNode() cfg.StoreLiveness = newStoreLiveness(env.Fabric, id) + // If the node creating command hasn't specified the CRDBVersion, use the + // latest one. + if cfg.CRDBVersion == nil { + cfg.CRDBVersion = cluster.MakeTestingClusterSettings().Version + } + if env.Options.OnConfig != nil { env.Options.OnConfig(&cfg) if cfg.ID != id { diff --git a/pkg/raft/rafttest/interaction_env_handler_forget_leader.go b/pkg/raft/rafttest/interaction_env_handler_forget_leader.go index b239db75e1d2..bf0111ac13d6 100644 --- a/pkg/raft/rafttest/interaction_env_handler_forget_leader.go +++ b/pkg/raft/rafttest/interaction_env_handler_forget_leader.go @@ -1,3 +1,6 @@ +// This code has been modified from its original form by Cockroach Labs, Inc. +// All modifications are Copyright 2024 Cockroach Labs, Inc. +// // Copyright 2023 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,3 +33,8 @@ func (env *InteractionEnv) handleForgetLeader(t *testing.T, d datadriven.TestDat func (env *InteractionEnv) ForgetLeader(idx int) { env.Nodes[idx].ForgetLeader() } + +func (env *InteractionEnv) handleStepDown(t *testing.T, d datadriven.TestData) error { + idx := firstAsNodeIdx(t, d) + return env.Nodes[idx].TestingStepDown() +} diff --git a/pkg/raft/rafttest/interaction_env_handler_raftstate.go b/pkg/raft/rafttest/interaction_env_handler_raftstate.go index 960eb144ce38..512b433e1fb5 100644 --- a/pkg/raft/rafttest/interaction_env_handler_raftstate.go +++ b/pkg/raft/rafttest/interaction_env_handler_raftstate.go @@ -19,9 +19,11 @@ package rafttest import ( "fmt" + "testing" "github.com/cockroachdb/cockroach/pkg/raft" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/datadriven" ) // isVoter checks whether node id is in the voter list within st. @@ -51,3 +53,11 @@ func (env *InteractionEnv) handleRaftState() error { } return nil } + +// handlePrintSupportState pretty-prints the support map being tracked by a raft +// peer. +func (env *InteractionEnv) handlePrintSupportState(t *testing.T, d datadriven.TestData) error { + idx := firstAsNodeIdx(t, d) + fmt.Fprint(env.Output, env.Nodes[idx].TestingSupportStateString()) + return nil +} diff --git a/pkg/raft/rafttest/node.go b/pkg/raft/rafttest/node.go index 1c9dd4a00812..50468e14cbad 100644 --- a/pkg/raft/rafttest/node.go +++ b/pkg/raft/rafttest/node.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/raft" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" ) type node struct { @@ -54,6 +55,7 @@ func startNode(id raftpb.PeerID, peers []raft.Peer, iface iface) *node { MaxInflightMsgs: 256, MaxUncommittedEntriesSize: 1 << 30, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } rn := raft.StartNode(c, peers) n := &node{ @@ -145,6 +147,7 @@ func (n *node) restart() { MaxInflightMsgs: 256, MaxUncommittedEntriesSize: 1 << 30, StoreLiveness: raftstoreliveness.AlwaysLive{}, + CRDBVersion: cluster.MakeTestingClusterSettings().Version, } n.Node = raft.RestartNode(c) n.start() diff --git a/pkg/raft/rafttest/node_test.go b/pkg/raft/rafttest/node_test.go index 4323b76005b9..4a6ed4ec1e11 100644 --- a/pkg/raft/rafttest/node_test.go +++ b/pkg/raft/rafttest/node_test.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/raft" pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" - "github.com/cockroachdb/cockroach/pkg/testutils/skip" ) func TestBasicProgress(t *testing.T) { @@ -54,9 +53,6 @@ func TestBasicProgress(t *testing.T) { } func TestRestart(t *testing.T) { - // TODO(pav-kv): de-flake it. See https://github.com/etcd-io/raft/issues/181. - skip.UnderStress(t, "the test is flaky") - peers := []raft.Peer{{ID: 1, Context: nil}, {ID: 2, Context: nil}, {ID: 3, Context: nil}, {ID: 4, Context: nil}, {ID: 5, Context: nil}} nt := newRaftNetwork(1, 2, 3, 4, 5) @@ -136,15 +132,14 @@ func TestPause(t *testing.T) { } func waitLeader(ns []*node) int { - var l map[pb.PeerID]struct{} - var lindex int - + l := make(map[pb.PeerID]struct{}) for { - l = make(map[pb.PeerID]struct{}) + clear(l) + lindex := -1 for i, n := range ns { lead := n.Status().HardState.Lead - if lead != 0 { + if lead != raft.None { l[lead] = struct{}{} if n.id == lead { lindex = i @@ -152,7 +147,7 @@ func waitLeader(ns []*node) int { } } - if len(l) == 1 { + if len(l) == 1 && lindex != -1 { return lindex } } diff --git a/pkg/raft/rawnode.go b/pkg/raft/rawnode.go index e72f31bf9ac3..8e9b09798a2f 100644 --- a/pkg/raft/rawnode.go +++ b/pkg/raft/rawnode.go @@ -176,6 +176,7 @@ func (rn *RawNode) readyWithoutAccept() Ready { // MustSync returns true if the hard state and count of Raft entries indicate // that a synchronous write to persistent storage is required. +// NOTE: MustSync isn't used under AsyncStorageWrites mode. func MustSync(st, prevst pb.HardState, entsnum int) bool { // Persistent state on all servers: // (Updated on stable storage before responding to RPCs) @@ -185,7 +186,7 @@ func MustSync(st, prevst pb.HardState, entsnum int) bool { // votedFor // log entries[] return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term || - st.Lead != prevst.Lead || st.LeadEpoch != prevst.LeadEpoch + st.Lead != prevst.Lead || st.LeadEpoch != prevst.LeadEpoch || st.Commit != prevst.Commit } func needStorageAppendMsg(r *raft, rd Ready) bool { @@ -217,6 +218,7 @@ func newStorageAppendMsg(r *raft, rd Ready) pb.Message { Entries: rd.Entries, } if ln := len(rd.Entries); ln != 0 { + // See comment in newStorageAppendRespMsg for why the accTerm is attached. m.LogTerm = r.raftLog.accTerm() m.Index = rd.Entries[ln-1].Index } @@ -237,6 +239,8 @@ func newStorageAppendMsg(r *raft, rd Ready) pb.Message { if !IsEmptySnap(rd.Snapshot) { snap := rd.Snapshot m.Snapshot = &snap + // See comment in newStorageAppendRespMsg for why the accTerm is attached. + m.LogTerm = r.raftLog.accTerm() } // Attach all messages in msgsAfterAppend as responses to be delivered after // the message is processed, along with a self-directed MsgStorageAppendResp @@ -248,6 +252,9 @@ func newStorageAppendMsg(r *raft, rd Ready) pb.Message { // handling to use a fast-path in r.raftLog.term() before the newly appended // entries are removed from the unstable log. m.Responses = r.msgsAfterAppend + // Warning: there is code outside raft package depending on the order of + // Responses, particularly MsgStorageAppendResp being last in this list. + // Change this with caution. if needStorageAppendRespMsg(rd) { m.Responses = append(m.Responses, newStorageAppendRespMsg(r, rd)) } @@ -321,6 +328,7 @@ func newStorageAppendRespMsg(r *raft, rd Ready) pb.Message { if !IsEmptySnap(rd.Snapshot) { snap := rd.Snapshot m.Snapshot = &snap + m.LogTerm = r.raftLog.accTerm() } return m } @@ -445,6 +453,27 @@ func (rn *RawNode) Advance(_ Ready) { rn.stepsOnAdvance = rn.stepsOnAdvance[:0] } +// Term returns the current in-memory term of this RawNode. This term may not +// yet have been persisted in storage. +func (rn *RawNode) Term() uint64 { + return rn.raft.Term +} + +// Lead returns the leader of Term(), or None if the leader is unknown. +func (rn *RawNode) Lead() pb.PeerID { + return rn.raft.lead +} + +// NextUnstableIndex returns the index of the next entry that will be sent to +// local storage, if there are any. All entries < this index are either stored, +// or have been sent to storage. +// +// NB: NextUnstableIndex can regress when the node accepts appends or snapshots +// from a newer leader. +func (rn *RawNode) NextUnstableIndex() uint64 { + return rn.raft.raftLog.unstable.entryInProgress + 1 +} + // Status returns the current status of the given group. This allocates, see // SparseStatus, BasicStatus and WithProgress for allocation-friendlier choices. func (rn *RawNode) Status() Status { @@ -470,9 +499,6 @@ func (rn *RawNode) LeadSupportStatus() LeadSupportStatus { return getLeadSupportStatus(rn.raft) } -// TODO(nvanbenschoten): remove this one the method is used. -var _ = (*RawNode).LeadSupportStatus - // ProgressType indicates the type of replica a Progress corresponds to. type ProgressType byte @@ -511,3 +537,11 @@ func (rn *RawNode) TransferLeader(transferee pb.PeerID) { func (rn *RawNode) ForgetLeader() error { return rn.raft.Step(pb.Message{Type: pb.MsgForgetLeader}) } + +func (rn *RawNode) TestingStepDown() error { + return rn.raft.testingStepDown() +} + +func (rn *RawNode) TestingSupportStateString() string { + return rn.raft.supportTracker.String() +} diff --git a/pkg/raft/rawnode_test.go b/pkg/raft/rawnode_test.go index 2a035ec8b4d4..84349e043057 100644 --- a/pkg/raft/rawnode_test.go +++ b/pkg/raft/rawnode_test.go @@ -350,7 +350,7 @@ func TestRawNodeJointAutoLeave(t *testing.T) { exp2Cs := pb.ConfState{Voters: []pb.PeerID{1}, Learners: []pb.PeerID{2}} s := newTestMemoryStorage(withPeers(1)) - rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s)) + rawNode, err := NewRawNode(newTestConfig(1, 10, 1, s, withFortificationDisabled())) require.NoError(t, err) rawNode.Campaign() @@ -524,7 +524,7 @@ func TestRawNodeStart(t *testing.T) { HardState: pb.HardState{Term: 1, Commit: 3, Vote: 1, Lead: 1, LeadEpoch: 1}, Entries: nil, // emitted & checked in intermediate Ready cycle CommittedEntries: entries, - MustSync: false, // since we're only applying, not appending + MustSync: true, // because we are advancing the commit index } storage := NewMemoryStorage() @@ -596,7 +596,7 @@ func TestRawNodeStart(t *testing.T) { require.True(t, rawNode.HasReady()) rd = rawNode.Ready() require.Empty(t, rd.Entries) - require.False(t, rd.MustSync) + require.True(t, rd.MustSync) rawNode.Advance(rd) rd.SoftState, want.SoftState = nil, nil @@ -637,6 +637,9 @@ func TestRawNodeRestart(t *testing.T) { // Ensure we campaign after the election timeout has elapsed. for i := 0; i < rawNode.raft.randomizedElectionTimeout; i++ { + // TODO(arul): consider getting rid of this hack to reset the epoch so that + // we can call an election without panicking. + rawNode.raft.leadEpoch = 0 rawNode.raft.tick() } assert.Equal(t, StateCandidate, rawNode.raft.state) @@ -767,11 +770,13 @@ func TestRawNodeCommitPaginationAfterRestart(t *testing.T) { highestApplied = rd.CommittedEntries[n-1].Index rawNode.Advance(rd) rawNode.Step(pb.Message{ - Type: pb.MsgHeartbeat, - To: 1, - From: 2, // illegal, but we get away with it - Term: 1, - Commit: 11, + Type: pb.MsgApp, + To: 1, + From: 2, // illegal, but we get away with it + Term: 1, + LogTerm: 1, + Index: 11, + Commit: 11, }) } } diff --git a/pkg/raft/status.go b/pkg/raft/status.go index 2c96d9bf5f7e..0ec5ffb8ffc3 100644 --- a/pkg/raft/status.go +++ b/pkg/raft/status.go @@ -131,7 +131,7 @@ func getStatus(r *raft) Status { // NOTE: we assign to LeadSupportUntil even if RaftState is not currently // StateLeader. The replica may have been the leader and stepped down to a // follower before its lead support ran out. - s.LeadSupportUntil = hlc.Timestamp{} // TODO(arul): populate this field + s.LeadSupportUntil = r.supportTracker.LeadSupportUntil() return s } @@ -155,7 +155,7 @@ func getSparseStatus(r *raft) SparseStatus { func getLeadSupportStatus(r *raft) LeadSupportStatus { var s LeadSupportStatus s.BasicStatus = getBasicStatus(r) - s.LeadSupportUntil = hlc.Timestamp{} // TODO(arul): populate this field + s.LeadSupportUntil = r.supportTracker.LeadSupportUntil() return s } diff --git a/pkg/raft/testdata/async_storage_writes.txt b/pkg/raft/testdata/async_storage_writes.txt index 7c72cc870ac9..de45ee670897 100644 --- a/pkg/raft/testdata/async_storage_writes.txt +++ b/pkg/raft/testdata/async_storage_writes.txt @@ -94,6 +94,7 @@ stabilize 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] 1->AppendThread MsgStorageAppend Term:1 Log:1/11 Commit:10 Vote:1 Lead:1 LeadEpoch:1 Entries:[1/11 EntryNormal ""] Responses:[ 1->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 1->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 AppendThread->1 MsgStorageAppendResp Term:0 Log:1/11 ] > 2 receiving messages @@ -107,6 +108,7 @@ stabilize 1->AppendThread MsgStorageAppend Term:1 Log:1/11 Commit:10 Vote:1 Lead:1 LeadEpoch:1 Entries:[1/11 EntryNormal ""] Responses: 1->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 1->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 AppendThread->1 MsgStorageAppendResp Term:0 Log:1/11 > 2 handling Ready Ready MustSync=true: @@ -132,6 +134,7 @@ stabilize ] > 1 receiving messages 1->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 1->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 AppendThread->1 MsgStorageAppendResp Term:0 Log:1/11 > 2 processing append thread Processing: @@ -157,7 +160,7 @@ stabilize > 3 receiving messages AppendThread->3 MsgStorageAppendResp Term:0 Log:1/11 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 CommittedEntries: 1/11 EntryNormal "" @@ -182,7 +185,7 @@ stabilize Responses: ApplyThread->1 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/11 EntryNormal ""] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 CommittedEntries: 1/11 EntryNormal "" @@ -194,7 +197,7 @@ stabilize ApplyThread->2 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/11 EntryNormal ""] ] > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 CommittedEntries: 1/11 EntryNormal "" @@ -259,9 +262,9 @@ process-ready 1 2 3 status 1 ---- -1: StateReplicate match=11 next=13 -2: StateReplicate match=11 next=13 inflight=1 -3: StateReplicate match=11 next=13 inflight=1 +1: StateReplicate match=11 next=13 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=13 sentCommit=11 matchCommit=11 inflight=1 +3: StateReplicate match=11 next=13 sentCommit=11 matchCommit=11 inflight=1 deliver-msgs 1 2 3 ---- @@ -373,9 +376,9 @@ AppendThread->3 MsgStorageAppendResp Term:0 Log:1/12 status 1 ---- -1: StateReplicate match=12 next=14 -2: StateReplicate match=12 next=14 inflight=1 -3: StateReplicate match=12 next=14 inflight=1 +1: StateReplicate match=12 next=14 sentCommit=11 matchCommit=11 +2: StateReplicate match=12 next=14 sentCommit=12 matchCommit=11 inflight=1 +3: StateReplicate match=12 next=14 sentCommit=12 matchCommit=11 inflight=1 propose 1 prop_3 ---- @@ -613,7 +616,7 @@ ApplyThread->3 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/12 EntryNormal "pro process-ready 1 2 3 ---- > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:14 Lead:1 LeadEpoch:1 CommittedEntries: 1/14 EntryNormal "prop_3" @@ -639,7 +642,7 @@ process-ready 1 2 3 > 1 handling Ready > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:14 Lead:1 LeadEpoch:1 CommittedEntries: 1/14 EntryNormal "prop_3" @@ -651,7 +654,7 @@ process-ready 1 2 3 ApplyThread->2 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/14 EntryNormal "prop_3"] ] > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:14 Lead:1 LeadEpoch:1 CommittedEntries: 1/14 EntryNormal "prop_3" @@ -721,7 +724,7 @@ ApplyThread->3 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/13 EntryNormal "pro process-ready 1 2 3 ---- > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:15 Lead:1 LeadEpoch:1 CommittedEntries: 1/15 EntryNormal "prop_4" @@ -747,7 +750,7 @@ process-ready 1 2 3 > 1 handling Ready > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:15 Lead:1 LeadEpoch:1 CommittedEntries: 1/15 EntryNormal "prop_4" @@ -759,7 +762,7 @@ process-ready 1 2 3 ApplyThread->2 MsgStorageApplyResp Term:0 Log:0/0 Entries:[1/15 EntryNormal "prop_4"] ] > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:15 Lead:1 LeadEpoch:1 CommittedEntries: 1/15 EntryNormal "prop_4" diff --git a/pkg/raft/testdata/async_storage_writes_append_aba_race.txt b/pkg/raft/testdata/async_storage_writes_append_aba_race.txt index ddc30b88b11e..c1f6e0a6ce79 100644 --- a/pkg/raft/testdata/async_storage_writes_append_aba_race.txt +++ b/pkg/raft/testdata/async_storage_writes_append_aba_race.txt @@ -71,6 +71,17 @@ Messages: # Step 4: node 3 becomes the leader after getting a vote from nodes 4, 5, and 6. +bump-epoch 2 +---- + 1 2 3 4 5 6 7 +1 1 2 1 1 1 1 1 +2 1 2 1 1 1 1 1 +3 1 2 1 1 1 1 1 +4 1 2 1 1 1 1 1 +5 1 2 1 1 1 1 1 +6 1 2 1 1 1 1 1 +7 1 2 1 1 1 1 1 + campaign 3 ---- INFO 3 is starting a new election at term 1 @@ -86,7 +97,7 @@ process-ready 3 ---- Ready MustSync=true: State:StateCandidate -HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 +HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgVote Term:2 Log:1/11 3->2 MsgVote Term:2 Log:1/11 @@ -94,7 +105,7 @@ Messages: 3->5 MsgVote Term:2 Log:1/11 3->6 MsgVote Term:2 Log:1/11 3->7 MsgVote Term:2 Log:1/11 -3->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 Responses:[ +3->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses:[ 3->3 MsgVoteResp Term:2 Log:0/0 ] @@ -117,23 +128,23 @@ process-ready 4 5 6 ---- > 4 handling Ready Ready MustSync=true: - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: - 4->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 Responses:[ + 4->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses:[ 4->3 MsgVoteResp Term:2 Log:0/0 ] > 5 handling Ready Ready MustSync=true: - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: - 5->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 Responses:[ + 5->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses:[ 5->3 MsgVoteResp Term:2 Log:0/0 ] > 6 handling Ready Ready MustSync=true: - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: - 6->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 Responses:[ + 6->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses:[ 6->3 MsgVoteResp Term:2 Log:0/0 ] @@ -141,22 +152,22 @@ process-append-thread 3 4 5 6 ---- > 3 processing append thread Processing: - 3->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 + 3->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses: 3->3 MsgVoteResp Term:2 Log:0/0 > 4 processing append thread Processing: - 4->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 + 4->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses: 4->3 MsgVoteResp Term:2 Log:0/0 > 5 processing append thread Processing: - 5->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 + 5->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses: 5->3 MsgVoteResp Term:2 Log:0/0 > 6 processing append thread Processing: - 6->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 LeadEpoch:1 + 6->AppendThread MsgStorageAppend Term:2 Log:0/0 Commit:11 Vote:3 Responses: 6->3 MsgVoteResp Term:2 Log:0/0 @@ -203,6 +214,7 @@ Messages: 3->7 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] 3->AppendThread MsgStorageAppend Term:2 Log:2/12 Commit:11 Vote:3 Lead:3 LeadEpoch:1 Entries:[2/12 EntryNormal ""] Responses:[ 3->3 MsgAppResp Term:2 Log:0/12 Commit:11 + 3->3 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 AppendThread->3 MsgStorageAppendResp Term:0 Log:2/12 ] @@ -260,7 +272,7 @@ process-ready 4 ---- Ready MustSync=true: State:StateCandidate -HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:1 +HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:0 Messages: 4->1 MsgVote Term:3 Log:1/11 4->2 MsgVote Term:3 Log:1/11 @@ -268,7 +280,7 @@ Messages: 4->5 MsgVote Term:3 Log:1/11 4->6 MsgVote Term:3 Log:1/11 4->7 MsgVote Term:3 Log:1/11 -4->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 Responses:[ +4->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses:[ 4->4 MsgVoteResp Term:3 Log:0/0 ] @@ -291,23 +303,23 @@ process-ready 5 6 7 ---- > 5 handling Ready Ready MustSync=true: - HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:0 Messages: - 5->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 Responses:[ + 5->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses:[ 5->4 MsgVoteResp Term:3 Log:0/0 ] > 6 handling Ready Ready MustSync=true: - HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:0 Messages: - 6->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 Responses:[ + 6->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses:[ 6->4 MsgVoteResp Term:3 Log:0/0 ] > 7 handling Ready Ready MustSync=true: - HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:4 Commit:11 Lead:0 LeadEpoch:0 Messages: - 7->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 Responses:[ + 7->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses:[ 7->4 MsgVoteResp Term:3 Log:0/0 ] @@ -315,22 +327,22 @@ process-append-thread 4 5 6 7 ---- > 4 processing append thread Processing: - 4->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 + 4->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses: 4->4 MsgVoteResp Term:3 Log:0/0 > 5 processing append thread Processing: - 5->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 + 5->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses: 5->4 MsgVoteResp Term:3 Log:0/0 > 6 processing append thread Processing: - 6->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 + 6->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses: 6->4 MsgVoteResp Term:3 Log:0/0 > 7 processing append thread Processing: - 7->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 LeadEpoch:1 + 7->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Vote:4 Responses: 7->4 MsgVoteResp Term:3 Log:0/0 @@ -372,6 +384,7 @@ Messages: 4->7 MsgApp Term:3 Log:1/11 Commit:11 Entries:[3/12 EntryNormal ""] 4->AppendThread MsgStorageAppend Term:3 Log:3/12 Commit:11 Vote:4 Lead:4 LeadEpoch:1 Entries:[3/12 EntryNormal ""] Responses:[ 4->4 MsgAppResp Term:3 Log:0/12 Commit:11 + 4->4 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 AppendThread->4 MsgStorageAppendResp Term:0 Log:3/12 ] @@ -411,10 +424,10 @@ INFO 1 became follower at term 3 process-ready 1 ---- Ready MustSync=true: -HardState Term:3 Commit:11 Lead:4 LeadEpoch:1 +HardState Term:3 Commit:11 Lead:4 LeadEpoch:0 Messages: 1->4 MsgHeartbeatResp Term:3 Log:0/0 -1->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Lead:4 LeadEpoch:1 +1->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Lead:4 deliver-msgs 4 ---- @@ -500,7 +513,7 @@ INFO mark (term,index)=(2,12) mismatched the last accepted term 3 in unstable lo process-append-thread 1 ---- Processing: -1->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Lead:4 LeadEpoch:1 +1->AppendThread MsgStorageAppend Term:3 Log:0/0 Commit:11 Lead:4 Responses: raft-log 1 diff --git a/pkg/raft/testdata/campaign.txt b/pkg/raft/testdata/campaign.txt index 73d5d6f4b0ec..31e755931ee6 100644 --- a/pkg/raft/testdata/campaign.txt +++ b/pkg/raft/testdata/campaign.txt @@ -97,7 +97,7 @@ stabilize 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 3->1 MsgAppResp Term:1 Log:0/3 Commit:2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -109,14 +109,14 @@ stabilize > 3 receiving messages 1->3 MsgApp Term:1 Log:1/3 Commit:3 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" Messages: 2->1 MsgAppResp Term:1 Log:0/3 Commit:3 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" diff --git a/pkg/raft/testdata/campaign_learner_must_vote.txt b/pkg/raft/testdata/campaign_learner_must_vote.txt index 3c6dbd1adce2..42a62f27813a 100644 --- a/pkg/raft/testdata/campaign_learner_must_vote.txt +++ b/pkg/raft/testdata/campaign_learner_must_vote.txt @@ -46,6 +46,10 @@ ok # We now pretend that n1 is dead, and n2 is trying to become leader. +bump-epoch 1 +---- +ok + log-level debug ---- ok @@ -62,7 +66,7 @@ process-ready 2 ---- Ready MustSync=true: State:StateCandidate -HardState Term:2 Vote:2 Commit:4 Lead:0 LeadEpoch:1 +HardState Term:2 Vote:2 Commit:4 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVote Term:2 Log:1/4 2->3 MsgVote Term:2 Log:1/4 @@ -82,7 +86,7 @@ stabilize 3 INFO 3 [logterm: 1, index: 3, vote: 0] cast MsgVote for 2 [logterm: 1, index: 4] at term 2 > 3 handling Ready Ready MustSync=true: - HardState Term:2 Vote:2 Commit:3 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:2 Commit:3 Lead:0 LeadEpoch:0 Messages: 3->2 MsgVoteResp Term:2 Log:0/0 @@ -118,7 +122,7 @@ stabilize 2 3 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 3->2 MsgAppResp Term:2 Log:1/4 Rejected (Hint: 3) Commit:3 DEBUG 2 received MsgAppResp(rejected, hint: (index 3, term 1)) from 3 for index 4 - DEBUG 2 decreased progress of 3 to [StateProbe match=0 next=4] + DEBUG 2 decreased progress of 3 to [StateProbe match=0 next=4 sentCommit=3 matchCommit=3] > 2 handling Ready Ready MustSync=false: Messages: @@ -145,7 +149,7 @@ stabilize 2 3 > 2 receiving messages 3->2 MsgAppResp Term:2 Log:0/5 Commit:4 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:2 Commit:5 Lead:2 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" @@ -154,7 +158,7 @@ stabilize 2 3 > 3 receiving messages 2->3 MsgApp Term:2 Log:2/5 Commit:5 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:2 Commit:5 Lead:2 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" diff --git a/pkg/raft/testdata/checkquorum.txt b/pkg/raft/testdata/checkquorum.txt index 79b1be030d6d..57a5c14841c7 100644 --- a/pkg/raft/testdata/checkquorum.txt +++ b/pkg/raft/testdata/checkquorum.txt @@ -20,6 +20,10 @@ stabilize ---- ok +bump-epoch 1 +---- +ok + log-level debug ---- ok @@ -37,7 +41,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVote Term:2 Log:1/11 2->3 MsgVote Term:2 Log:1/11 @@ -45,10 +49,10 @@ stabilize INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections > 1 receiving messages 2->1 MsgVote Term:2 Log:1/11 - INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) > 3 receiving messages 2->3 MsgVote Term:2 Log:1/11 - INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) # Tick the leader without processing any messages from followers. We have to # tick 2 election timeouts, since the followers were active in the current @@ -70,28 +74,28 @@ stabilize Ready MustSync=false: State:StateFollower Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=false: Messages: @@ -128,7 +132,7 @@ stabilize INFO 1 [term: 2] ignored a MsgHeartbeatResp message with lower term from 3 [term: 1] > 1 handling Ready Ready MustSync=true: - HardState Term:2 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Commit:11 Lead:0 LeadEpoch:0 # Other nodes can now successfully campaign. Note that we haven't ticked 3, so # it won't grant votes. @@ -142,7 +146,7 @@ INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 3 process-ready 2 ---- Ready MustSync=true: -HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:1 +HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVote Term:3 Log:1/11 2->3 MsgVote Term:3 Log:1/11 @@ -159,13 +163,13 @@ INFO 1 [logterm: 1, index: 11, vote: 0] cast MsgVote for 2 [logterm: 1, index: 1 deliver-msgs 3 ---- 2->3 MsgVote Term:3 Log:1/11 -INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) +INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) stabilize ---- > 1 handling Ready Ready MustSync=true: - HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:0 Messages: 1->2 MsgVoteResp Term:3 Log:0/0 > 2 receiving messages @@ -214,7 +218,7 @@ stabilize 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 3->2 MsgAppResp Term:3 Log:0/12 Commit:11 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 CommittedEntries: 3/12 EntryNormal "" @@ -226,14 +230,14 @@ stabilize > 3 receiving messages 2->3 MsgApp Term:3 Log:3/12 Commit:12 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 CommittedEntries: 3/12 EntryNormal "" Messages: 1->2 MsgAppResp Term:3 Log:0/12 Commit:12 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Commit:12 Lead:2 LeadEpoch:1 CommittedEntries: 3/12 EntryNormal "" diff --git a/pkg/raft/testdata/confchange_disable_validation.txt b/pkg/raft/testdata/confchange_disable_validation.txt index 98ef9892a61b..666829a87407 100644 --- a/pkg/raft/testdata/confchange_disable_validation.txt +++ b/pkg/raft/testdata/confchange_disable_validation.txt @@ -41,7 +41,7 @@ stabilize 1 1/4 EntryNormal "foo" 1/5 EntryConfChangeV2 l2 l3 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryNormal "foo" diff --git a/pkg/raft/testdata/confchange_drop_if_unapplied.txt b/pkg/raft/testdata/confchange_drop_if_unapplied.txt index 10fb2395d900..1cbf38642115 100644 --- a/pkg/raft/testdata/confchange_drop_if_unapplied.txt +++ b/pkg/raft/testdata/confchange_drop_if_unapplied.txt @@ -50,7 +50,7 @@ stabilize 1 1/4 EntryConfChangeV2 l2 l3 INFO 1 switched to configuration voters=(1)&&(1) learners=(2 3) > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryNormal "" diff --git a/pkg/raft/testdata/confchange_v1_add_single.txt b/pkg/raft/testdata/confchange_v1_add_single.txt index 13772610f4fb..e8cb0c1b1d9b 100644 --- a/pkg/raft/testdata/confchange_v1_add_single.txt +++ b/pkg/raft/testdata/confchange_v1_add_single.txt @@ -47,7 +47,7 @@ stabilize 1/3 EntryNormal "" 1/4 EntryConfChange v2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -70,9 +70,9 @@ stabilize > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3 - DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 paused pendingSnap=4] + DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 sentCommit=4 matchCommit=0 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -86,11 +86,11 @@ stabilize INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 2 [commit: 4] restored snapshot [index: 4, term: 1] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false Messages: 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 sentCommit=4 matchCommit=4 paused pendingSnap=4] diff --git a/pkg/raft/testdata/confchange_v1_remove_leader.txt b/pkg/raft/testdata/confchange_v1_remove_leader.txt index d8d7e5fd4bdd..8adde53b23de 100644 --- a/pkg/raft/testdata/confchange_v1_remove_leader.txt +++ b/pkg/raft/testdata/confchange_v1_remove_leader.txt @@ -95,7 +95,7 @@ stabilize 1 2->1 MsgAppResp Term:1 Log:0/4 Commit:3 2->1 MsgAppResp Term:1 Log:0/5 Commit:3 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryConfChange r1 @@ -175,7 +175,7 @@ stabilize 3->1 MsgAppResp Term:1 Log:0/6 Commit:4 3->1 MsgAppResp Term:1 Log:0/6 Commit:5 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:6 Lead:1 LeadEpoch:1 CommittedEntries: 1/6 EntryNormal "bar" @@ -187,14 +187,14 @@ stabilize > 3 receiving messages 1->3 MsgApp Term:1 Log:1/6 Commit:6 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:6 Lead:1 LeadEpoch:1 CommittedEntries: 1/6 EntryNormal "bar" Messages: 2->1 MsgAppResp Term:1 Log:0/6 Commit:6 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:6 Lead:1 LeadEpoch:1 CommittedEntries: 1/6 EntryNormal "bar" @@ -223,12 +223,12 @@ stabilize > 1 handling Ready Ready MustSync=false: Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:6 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:6 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:6 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:6 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=false: Messages: diff --git a/pkg/raft/testdata/confchange_v1_remove_leader_stepdown.txt b/pkg/raft/testdata/confchange_v1_remove_leader_stepdown.txt index 2970cb521484..8ae3419a5e38 100644 --- a/pkg/raft/testdata/confchange_v1_remove_leader_stepdown.txt +++ b/pkg/raft/testdata/confchange_v1_remove_leader_stepdown.txt @@ -94,7 +94,7 @@ stabilize 1 2->1 MsgAppResp Term:1 Log:0/4 Commit:3 2->1 MsgAppResp Term:1 Log:0/5 Commit:3 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryConfChange r1 diff --git a/pkg/raft/testdata/confchange_v2_add_double_auto.txt b/pkg/raft/testdata/confchange_v2_add_double_auto.txt index 2f90578c55da..d71e3304a67b 100644 --- a/pkg/raft/testdata/confchange_v2_add_double_auto.txt +++ b/pkg/raft/testdata/confchange_v2_add_double_auto.txt @@ -54,7 +54,7 @@ Entries: # it has to since we're carrying out two additions at once. process-ready 1 ---- -Ready MustSync=false: +Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -93,9 +93,9 @@ stabilize 1 2 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3 - DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 paused pendingSnap=4] + DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 sentCommit=4 matchCommit=0 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -109,14 +109,14 @@ stabilize 1 2 INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 2 [commit: 4] restored snapshot [index: 4, term: 1] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true Messages: 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 sentCommit=4 matchCommit=4 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -132,7 +132,7 @@ stabilize 1 2 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/5 Commit:4 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryConfChangeV2 @@ -142,7 +142,7 @@ stabilize 1 2 > 2 receiving messages 1->2 MsgApp Term:1 Log:1/5 Commit:5 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:5 Lead:1 LeadEpoch:0 CommittedEntries: 1/5 EntryConfChangeV2 @@ -168,9 +168,9 @@ stabilize 1 3 > 1 receiving messages 3->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 3 for index 3 - DEBUG 1 decreased progress of 3 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 5] sent snapshot[index: 5, term: 1] to 3 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=6 paused pendingSnap=5] + DEBUG 1 decreased progress of 3 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 5] sent snapshot[index: 5, term: 1] to 3 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=6 sentCommit=5 matchCommit=0 paused pendingSnap=5] > 1 handling Ready Ready MustSync=false: Messages: @@ -184,14 +184,14 @@ stabilize 1 3 INFO 3 [commit: 5, lastindex: 5, lastterm: 1] restored snapshot [index: 5, term: 1] INFO 3 [commit: 5] restored snapshot [index: 5, term: 1] > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:5 Lead:1 LeadEpoch:0 Snapshot Index:5 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false Messages: 3->1 MsgAppResp Term:1 Log:0/5 Commit:5 > 1 receiving messages 3->1 MsgAppResp Term:1 Log:0/5 Commit:5 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=5 next=6 paused pendingSnap=5] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=5 next=6 sentCommit=5 matchCommit=5 paused pendingSnap=5] # Nothing else happens. stabilize @@ -266,7 +266,7 @@ stabilize 1 2->1 MsgAppResp Term:1 Log:0/6 Commit:5 3->1 MsgAppResp Term:1 Log:0/6 Commit:5 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:6 Lead:1 LeadEpoch:1 CommittedEntries: 1/6 EntryConfChangeV2 r2 r3 @@ -342,7 +342,7 @@ stabilize 3->1 MsgAppResp Term:1 Log:0/8 Commit:6 3->1 MsgAppResp Term:1 Log:0/9 Commit:6 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:9 Lead:1 LeadEpoch:1 CommittedEntries: 1/7 EntryNormal "foo" @@ -365,7 +365,7 @@ stabilize 1->3 MsgApp Term:1 Log:1/9 Commit:8 1->3 MsgApp Term:1 Log:1/9 Commit:9 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:9 Lead:1 LeadEpoch:0 CommittedEntries: 1/7 EntryNormal "foo" @@ -377,7 +377,7 @@ stabilize 2->1 MsgAppResp Term:1 Log:0/9 Commit:9 INFO 2 switched to configuration voters=(1) > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:9 Lead:1 LeadEpoch:0 CommittedEntries: 1/7 EntryNormal "foo" diff --git a/pkg/raft/testdata/confchange_v2_add_double_implicit.txt b/pkg/raft/testdata/confchange_v2_add_double_implicit.txt index 90b6adddd033..78b0ff346cdb 100644 --- a/pkg/raft/testdata/confchange_v2_add_double_implicit.txt +++ b/pkg/raft/testdata/confchange_v2_add_double_implicit.txt @@ -50,7 +50,7 @@ stabilize 1 2 1/3 EntryNormal "" 1/4 EntryConfChangeV2 v2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -76,9 +76,9 @@ stabilize 1 2 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3 - DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 paused pendingSnap=4] + DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 sentCommit=4 matchCommit=0 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -92,14 +92,14 @@ stabilize 1 2 INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 2 [commit: 4] restored snapshot [index: 4, term: 1] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:true Messages: 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 sentCommit=4 matchCommit=4 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -115,7 +115,7 @@ stabilize 1 2 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/5 Commit:4 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryConfChangeV2 @@ -125,7 +125,7 @@ stabilize 1 2 > 2 receiving messages 1->2 MsgApp Term:1 Log:1/5 Commit:5 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:5 Lead:1 LeadEpoch:0 CommittedEntries: 1/5 EntryConfChangeV2 diff --git a/pkg/raft/testdata/confchange_v2_add_single_auto.txt b/pkg/raft/testdata/confchange_v2_add_single_auto.txt index b5c1318b003f..1d4466694e20 100644 --- a/pkg/raft/testdata/confchange_v2_add_single_auto.txt +++ b/pkg/raft/testdata/confchange_v2_add_single_auto.txt @@ -48,7 +48,7 @@ stabilize 1/3 EntryNormal "" 1/4 EntryConfChangeV2 v2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -71,9 +71,9 @@ stabilize > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3 - DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 paused pendingSnap=4] + DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 sentCommit=4 matchCommit=0 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -87,11 +87,11 @@ stabilize INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 2 [commit: 4] restored snapshot [index: 4, term: 1] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false Messages: 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 sentCommit=4 matchCommit=4 paused pendingSnap=4] diff --git a/pkg/raft/testdata/confchange_v2_add_single_explicit.txt b/pkg/raft/testdata/confchange_v2_add_single_explicit.txt index 18ca57817703..bcf2649eae32 100644 --- a/pkg/raft/testdata/confchange_v2_add_single_explicit.txt +++ b/pkg/raft/testdata/confchange_v2_add_single_explicit.txt @@ -48,7 +48,7 @@ stabilize 1 2 1/3 EntryNormal "" 1/4 EntryConfChangeV2 v2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/3 EntryNormal "" @@ -71,9 +71,9 @@ stabilize 1 2 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/3 Rejected (Hint: 0) DEBUG 1 received MsgAppResp(rejected, hint: (index 0, term 0)) from 2 for index 3 - DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1] - DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1] - DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 paused pendingSnap=4] + DEBUG 1 decreased progress of 2 to [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 [firstindex: 3, commit: 4] sent snapshot[index: 4, term: 1] to 2 [StateProbe match=0 next=1 sentCommit=0 matchCommit=0] + DEBUG 1 paused sending replication messages to 2 [StateSnapshot match=0 next=5 sentCommit=4 matchCommit=0 paused pendingSnap=4] > 1 handling Ready Ready MustSync=false: Messages: @@ -87,14 +87,14 @@ stabilize 1 2 INFO 2 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 2 [commit: 4] restored snapshot [index: 4, term: 1] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[1 2] VotersOutgoing:[1] Learners:[] LearnersNext:[] AutoLeave:false Messages: 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 paused pendingSnap=4] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 2 [StateSnapshot match=4 next=5 sentCommit=4 matchCommit=4 paused pendingSnap=4] # Check that we're not allowed to change membership again while in the joint state. # This leads to an empty entry being proposed instead (index 5 in the stabilize block @@ -135,7 +135,7 @@ stabilize 2->1 MsgAppResp Term:1 Log:0/5 Commit:4 2->1 MsgAppResp Term:1 Log:0/6 Commit:4 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:6 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryNormal "" @@ -148,7 +148,7 @@ stabilize 1->2 MsgApp Term:1 Log:1/6 Commit:5 1->2 MsgApp Term:1 Log:1/6 Commit:6 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:6 Lead:1 LeadEpoch:0 CommittedEntries: 1/5 EntryNormal "" @@ -186,7 +186,7 @@ stabilize > 1 receiving messages 2->1 MsgAppResp Term:1 Log:0/7 Commit:6 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:7 Lead:1 LeadEpoch:1 CommittedEntries: 1/7 EntryNormal "" @@ -195,7 +195,7 @@ stabilize > 2 receiving messages 1->2 MsgApp Term:1 Log:1/7 Commit:7 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:7 Lead:1 LeadEpoch:0 CommittedEntries: 1/7 EntryNormal "" diff --git a/pkg/raft/testdata/confchange_v2_replace_leader.txt b/pkg/raft/testdata/confchange_v2_replace_leader.txt index 986eba05b801..01d23179f29a 100644 --- a/pkg/raft/testdata/confchange_v2_replace_leader.txt +++ b/pkg/raft/testdata/confchange_v2_replace_leader.txt @@ -76,7 +76,7 @@ stabilize 2->1 MsgAppResp Term:1 Log:0/4 Commit:3 3->1 MsgAppResp Term:1 Log:0/4 Commit:3 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryConfChangeV2 r1 v4 @@ -93,7 +93,7 @@ stabilize Messages: 1->4 MsgApp Term:1 Log:1/3 Commit:4 Entries:[1/4 EntryConfChangeV2 r1 v4] > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryConfChangeV2 r1 v4 @@ -101,7 +101,7 @@ stabilize 2->1 MsgAppResp Term:1 Log:0/4 Commit:4 INFO 2 switched to configuration voters=(2 3 4)&&(1 2 3) > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryConfChangeV2 r1 v4 @@ -135,7 +135,7 @@ stabilize INFO 4 [commit: 4, lastindex: 4, lastterm: 1] restored snapshot [index: 4, term: 1] INFO 4 [commit: 4] restored snapshot [index: 4, term: 1] > 4 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:4 Lead:1 LeadEpoch:0 Snapshot Index:4 Term:1 ConfState:Voters:[2 3 4] VotersOutgoing:[1 2 3] Learners:[] LearnersNext:[] AutoLeave:false Messages: @@ -149,11 +149,13 @@ transfer-leadership from=1 to=4 ---- INFO 1 [term 1] starts to transfer leadership to 4 INFO 1 sends MsgTimeoutNow to 4 immediately as 4 already has up-to-date log +INFO 1 became follower at term 1 -# Leadership transfer wasn't processed yet. +# Leadership transfer was initiated by the outgoing leader, but not yet +# processed by the transfer target. raft-state ---- -1: StateLeader (Voter) Term:1 Lead:1 +1: StateFollower (Voter) Term:1 Lead:0 2: StateFollower (Voter) Term:1 Lead:1 3: StateFollower (Voter) Term:1 Lead:1 4: StateFollower (Voter) Term:1 Lead:1 @@ -163,6 +165,7 @@ stabilize ---- > 1 handling Ready Ready MustSync=false: + State:StateFollower Messages: 1->4 MsgTimeoutNow Term:1 Log:0/0 > 4 receiving messages @@ -200,18 +203,17 @@ stabilize INFO 3 [logterm: 1, index: 4, vote: 0] cast MsgVote for 4 [logterm: 1, index: 4] at term 2 > 1 handling Ready Ready MustSync=true: - State:StateFollower - HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:0 Messages: 1->4 MsgVoteResp Term:2 Log:0/0 > 2 handling Ready Ready MustSync=true: - HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:0 Messages: 2->4 MsgVoteResp Term:2 Log:0/0 > 3 handling Ready Ready MustSync=true: - HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:4 Commit:4 Lead:0 LeadEpoch:0 Messages: 3->4 MsgVoteResp Term:2 Log:0/0 > 4 receiving messages @@ -277,7 +279,7 @@ stabilize 3->4 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 3->4 MsgAppResp Term:2 Log:0/5 Commit:4 > 4 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:5 Lead:4 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" @@ -292,21 +294,21 @@ stabilize > 3 receiving messages 4->3 MsgApp Term:2 Log:2/5 Commit:5 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:5 Lead:4 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" Messages: 1->4 MsgAppResp Term:2 Log:0/5 Commit:5 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:5 Lead:4 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" Messages: 2->4 MsgAppResp Term:2 Log:0/5 Commit:5 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:5 Lead:4 LeadEpoch:1 CommittedEntries: 2/5 EntryNormal "" @@ -370,7 +372,7 @@ stabilize 2->4 MsgAppResp Term:2 Log:0/6 Commit:5 3->4 MsgAppResp Term:2 Log:0/6 Commit:5 > 4 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:6 Lead:4 LeadEpoch:1 CommittedEntries: 2/6 EntryConfChangeV2 @@ -386,7 +388,7 @@ stabilize > 3 receiving messages 4->3 MsgApp Term:2 Log:2/6 Commit:6 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:6 Lead:4 LeadEpoch:1 CommittedEntries: 2/6 EntryConfChangeV2 @@ -394,7 +396,7 @@ stabilize 1->4 MsgAppResp Term:2 Log:0/6 Commit:6 INFO 1 switched to configuration voters=(2 3 4) > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:6 Lead:4 LeadEpoch:1 CommittedEntries: 2/6 EntryConfChangeV2 @@ -402,7 +404,7 @@ stabilize 2->4 MsgAppResp Term:2 Log:0/6 Commit:6 INFO 2 switched to configuration voters=(2 3 4) > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:4 Commit:6 Lead:4 LeadEpoch:1 CommittedEntries: 2/6 EntryConfChangeV2 diff --git a/pkg/raft/testdata/confchange_v2_replace_leader_stepdown.txt b/pkg/raft/testdata/confchange_v2_replace_leader_stepdown.txt index 2e22f50c9313..b8dae85662d7 100644 --- a/pkg/raft/testdata/confchange_v2_replace_leader_stepdown.txt +++ b/pkg/raft/testdata/confchange_v2_replace_leader_stepdown.txt @@ -111,7 +111,7 @@ stabilize 3->1 MsgAppResp Term:1 Log:0/5 Commit:4 4->1 MsgAppResp Term:1 Log:0/5 Commit:4 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryConfChangeV2 @@ -131,7 +131,7 @@ stabilize Ready MustSync=false: State:StateFollower > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryConfChangeV2 @@ -139,7 +139,7 @@ stabilize 2->1 MsgAppResp Term:1 Log:0/5 Commit:5 INFO 2 switched to configuration voters=(2 3 4) > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:5 Lead:1 LeadEpoch:1 CommittedEntries: 1/5 EntryConfChangeV2 @@ -147,7 +147,7 @@ stabilize 3->1 MsgAppResp Term:1 Log:0/5 Commit:5 INFO 3 switched to configuration voters=(2 3 4) > 4 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:5 Lead:1 LeadEpoch:0 CommittedEntries: 1/5 EntryConfChangeV2 @@ -172,6 +172,16 @@ campaign 1 ---- WARN 1 is unpromotable and can not campaign +# TODO(arul): this is a hack until +# https://github.com/cockroachdb/cockroach/issues/129098 is fixed. +bump-epoch 1 +---- + 1 2 3 4 +1 2 1 1 1 +2 2 1 1 1 +3 2 1 1 1 +4 2 1 1 1 + # Campaign the dedicated voter n2 to become the new leader. campaign 2 ---- diff --git a/pkg/raft/testdata/forget_leader.txt b/pkg/raft/testdata/forget_leader.txt index 6c44015408bc..08971d2827f5 100644 --- a/pkg/raft/testdata/forget_leader.txt +++ b/pkg/raft/testdata/forget_leader.txt @@ -103,24 +103,24 @@ stabilize > 1 handling Ready Ready MustSync=false: Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->4 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 + 1->4 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=true: - HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 > 4 handling Ready Ready MustSync=true: - HardState Term:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Commit:11 Lead:0 LeadEpoch:0 > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 4 receiving messages - 1->4 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->4 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=true: - HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 Messages: 2->1 MsgHeartbeatResp Term:1 Log:0/0 > 3 handling Ready @@ -129,7 +129,7 @@ stabilize 3->1 MsgHeartbeatResp Term:1 Log:0/0 > 4 handling Ready Ready MustSync=true: - HardState Term:1 Commit:11 Lead:1 LeadEpoch:1 + HardState Term:1 Commit:11 Lead:1 LeadEpoch:0 Messages: 4->1 MsgHeartbeatResp Term:1 Log:0/0 > 1 receiving messages @@ -144,6 +144,14 @@ raft-state 3: StateFollower (Voter) Term:1 Lead:1 4: StateFollower (Non-Voter) Term:1 Lead:1 +withdraw-support 3 1 +---- + 1 2 3 4 +1 1 1 1 1 +2 x 1 1 1 +3 x 1 1 1 +4 x 1 1 1 + # ForgetLeader is a noop on candidates. campaign 3 ---- @@ -184,29 +192,58 @@ raft-state # ForgetLeader shouldn't affect the election timeout: if a follower # forgets the leader 1 tick before the election timeout fires, it # will still campaign on the next tick. -set-randomized-election-timeout 2 timeout=3 +# +# NB: We also must withdraw support in store liveness, because otherwise every +# tick will reset the election timer. +set-randomized-election-timeout 2 timeout=5 ---- ok +withdraw-support 2 3 +---- + 1 2 3 4 +1 1 1 1 1 +2 x 1 x 1 +3 x 1 1 1 +4 x 1 1 1 + tick-heartbeat 2 ---- -ok +DEBUG 2 setting election elapsed to start from 3 ticks after store liveness support expired tick-heartbeat 2 ---- ok -withdraw-support 2 3 +forget-leader 2 +---- +INFO 2 forgetting leader 3 at term 2 + +# Withdraw support for 3 from all peers so that they vote for 2 when 2 calls an +# election. +withdraw-support 1 3 ---- 1 2 3 4 -1 1 1 1 1 +1 1 1 x 1 2 x 1 x 1 -3 1 1 1 1 +3 x 1 1 1 4 x 1 1 1 -forget-leader 2 +withdraw-support 3 3 ---- -INFO 2 forgetting leader 3 at term 2 + 1 2 3 4 +1 1 1 x 1 +2 x 1 x 1 +3 x 1 x 1 +4 x 1 1 1 + +withdraw-support 4 3 +---- + 1 2 3 4 +1 1 1 x 1 +2 x 1 x 1 +3 x 1 x 1 +4 x 1 x 1 tick-heartbeat 2 ---- diff --git a/pkg/raft/testdata/forget_leader_prevote_checkquorum.txt b/pkg/raft/testdata/forget_leader_prevote_checkquorum.txt index 41ab3482de33..e3f73f374a12 100644 --- a/pkg/raft/testdata/forget_leader_prevote_checkquorum.txt +++ b/pkg/raft/testdata/forget_leader_prevote_checkquorum.txt @@ -23,6 +23,13 @@ log-level debug ---- ok +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 1 1 1 +3 x 1 1 + # If 3 attempts to campaign, 2 rejects it because it has a leader. campaign 3 ---- @@ -36,7 +43,7 @@ stabilize 3 > 3 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgPreVote Term:2 Log:1/11 3->2 MsgPreVote Term:2 Log:1/11 @@ -46,9 +53,9 @@ stabilize 3 deliver-msgs 1 2 ---- 3->1 MsgPreVote Term:2 Log:1/11 -INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) +INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 3->2 MsgPreVote Term:2 Log:1/11 -INFO 2 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) +INFO 2 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 # Make 1 assert leadership over 3 again. tick-heartbeat 1 @@ -60,12 +67,12 @@ stabilize > 1 handling Ready Ready MustSync=false: Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 INFO 3 became follower at term 1 > 2 handling Ready Ready MustSync=false: @@ -74,7 +81,7 @@ stabilize > 3 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 Messages: 3->1 MsgHeartbeatResp Term:1 Log:0/0 > 1 receiving messages @@ -98,7 +105,7 @@ withdraw-support 2 1 1 2 3 1 1 1 1 2 x 1 1 -3 1 1 1 +3 x 1 1 # If 2 forgets the leader, then 3 can obtain prevotes and hold an election # despite 2 having heard from the leader recently. @@ -124,7 +131,7 @@ stabilize 3 > 3 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgPreVote Term:2 Log:1/11 3->2 MsgPreVote Term:2 Log:1/11 @@ -135,7 +142,7 @@ stabilize 2 ---- > 2 handling Ready Ready MustSync=true: - HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 > 2 receiving messages 3->2 MsgPreVote Term:2 Log:1/11 INFO 2 [logterm: 1, index: 11, vote: 1] cast MsgPreVote for 3 [logterm: 1, index: 11] at term 1 @@ -156,7 +163,7 @@ stabilize 3 > 3 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgVote Term:2 Log:1/11 3->2 MsgVote Term:2 Log:1/11 @@ -205,7 +212,7 @@ withdraw-support 2 3 1 2 3 1 1 1 1 2 x 1 x -3 1 1 1 +3 x 1 1 forget-leader 2 ---- @@ -217,6 +224,13 @@ raft-log 1 1/11 EntryNormal "" 2/12 EntryNormal "" +withdraw-support 1 3 +---- + 1 2 3 +1 1 1 x +2 x 1 x +3 x 1 1 + campaign 1 ---- INFO 1 is starting a new election at term 2 @@ -228,7 +242,7 @@ process-ready 1 ---- Ready MustSync=true: State:StatePreCandidate -HardState Term:2 Commit:12 Lead:0 LeadEpoch:1 +HardState Term:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 1->2 MsgPreVote Term:3 Log:2/12 1->3 MsgPreVote Term:3 Log:2/12 @@ -239,7 +253,7 @@ stabilize 2 ---- > 2 handling Ready Ready MustSync=true: - HardState Term:2 Vote:3 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:12 Lead:0 LeadEpoch:0 > 2 receiving messages 1->2 MsgPreVote Term:3 Log:2/12 INFO 2 [logterm: 2, index: 13, vote: 3] rejected MsgPreVote from 1 [logterm: 2, index: 12] at term 2 diff --git a/pkg/raft/testdata/fortification_basic.txt b/pkg/raft/testdata/fortification_basic.txt index c3b7ea6e044e..2d1f19467824 100644 --- a/pkg/raft/testdata/fortification_basic.txt +++ b/pkg/raft/testdata/fortification_basic.txt @@ -129,7 +129,7 @@ stabilize 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 Rejected (Hint: 0) 3->1 MsgAppResp Term:1 Log:0/3 Commit:2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:3 CommittedEntries: 1/3 EntryNormal "" @@ -141,14 +141,14 @@ stabilize > 3 receiving messages 1->3 MsgApp Term:1 Log:1/3 Commit:3 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:2 CommittedEntries: 1/3 EntryNormal "" Messages: 2->1 MsgAppResp Term:1 Log:0/3 Commit:3 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:0 CommittedEntries: 1/3 EntryNormal "" diff --git a/pkg/raft/testdata/fortification_followers_dont_call_election.txt b/pkg/raft/testdata/fortification_followers_dont_call_election.txt new file mode 100644 index 000000000000..e135af91c85b --- /dev/null +++ b/pkg/raft/testdata/fortification_followers_dont_call_election.txt @@ -0,0 +1,200 @@ +# Test to ensure that a follower will not call an election if it's still +# supporting a fortified leader. + +log-level debug +---- +ok + +add-nodes 3 voters=(1,2,3) index=10 +---- +INFO 1 switched to configuration voters=(1 2 3) +INFO 1 became follower at term 0 +INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 2 switched to configuration voters=(1 2 3) +INFO 2 became follower at term 0 +INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 3 switched to configuration voters=(1 2 3) +INFO 3 became follower at term 0 +INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] + +campaign 1 +---- +INFO 1 is starting a new election at term 0 +INFO 1 became candidate at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 2 at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 3 at term 1 + +stabilize +---- +> 1 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVote Term:1 Log:1/10 + 1->3 MsgVote Term:1 Log:1/10 + INFO 1 received MsgVoteResp from 1 at term 1 + INFO 1 has received 1 MsgVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgVote Term:1 Log:1/10 + INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 2 became follower at term 1 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 3 receiving messages + 1->3 MsgVote Term:1 Log:1/10 + INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 3 became follower at term 1 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgVoteResp Term:1 Log:0/0 + INFO 1 received MsgVoteResp from 2 at term 1 + INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 1 became leader at term 1 + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 receiving messages + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 3 receiving messages + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 receiving messages + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgApp Term:1 Log:1/11 Commit:11 + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/11 Commit:11 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 + +store-liveness +---- + 1 2 3 +1 1 1 1 +2 1 1 1 +3 1 1 1 + +set-randomized-election-timeout 2 timeout=3 +---- +ok + +# Campaigning will fail when there is an active leader. +campaign 2 +---- +DEBUG 2 ignoring MsgHup due to leader fortification + +tick-election 2 +---- +ok + +# Withdraw support from 2 for 1 and tick 2 once. This should trigger an election +# (without having to wait out an entire randomized election timeout) because +# we're smart in recognizing when store liveness support expires. +# and tick an election. 2 should now be able to +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + +tick-heartbeat 2 +---- +DEBUG 2 setting election elapsed to start from 3 ticks after store liveness support expired +INFO 2 is starting a new election at term 1 +INFO 2 became candidate at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 2 + +# Set the randomized timeout for 3 to a value higher than election-timeout. This +# way, tick-election is guaranteed to not call an election which would reset +# the electionTimer (which would in-turn prevent 3 from granting 2 its vote). + +# Set the randomized timeout for 3 to 4, which is 1 tick more than the +# election-tick. We then withdraw store liveness support from 3 for 1; 3 should +# then wait for 1 tick before campaigning. This then shows that we're preserving +# randomness that's baked into raft elections without waiting out an entire +# election timeout. +set-randomized-election-timeout 3 timeout=4 +---- +ok + +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +tick-heartbeat 3 +---- +DEBUG 3 setting election elapsed to start from 3 ticks after store liveness support expired + +tick-heartbeat 3 +---- +INFO 3 is starting a new election at term 1 +INFO 3 became candidate at term 2 +INFO 3 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2 +INFO 3 [logterm: 1, index: 11] sent MsgVote request to 2 at term 2 diff --git a/pkg/raft/testdata/fortification_followers_dont_call_election_prevote.txt b/pkg/raft/testdata/fortification_followers_dont_call_election_prevote.txt new file mode 100644 index 000000000000..a6c2c9356c49 --- /dev/null +++ b/pkg/raft/testdata/fortification_followers_dont_call_election_prevote.txt @@ -0,0 +1,230 @@ +# Test to ensure that a follower will not call an election if it's still +# supporting a fortified leader. + +log-level debug +---- +ok + +add-nodes 3 voters=(1,2,3) index=10 prevote=true +---- +INFO 1 switched to configuration voters=(1 2 3) +INFO 1 became follower at term 0 +INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 2 switched to configuration voters=(1 2 3) +INFO 2 became follower at term 0 +INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 3 switched to configuration voters=(1 2 3) +INFO 3 became follower at term 0 +INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] + +campaign 1 +---- +INFO 1 is starting a new election at term 0 +INFO 1 became pre-candidate at term 0 +INFO 1 [logterm: 1, index: 10] sent MsgPreVote request to 2 at term 0 +INFO 1 [logterm: 1, index: 10] sent MsgPreVote request to 3 at term 0 + +stabilize +---- +> 1 handling Ready + Ready MustSync=false: + State:StatePreCandidate + Messages: + 1->2 MsgPreVote Term:1 Log:1/10 + 1->3 MsgPreVote Term:1 Log:1/10 + INFO 1 received MsgPreVoteResp from 1 at term 0 + INFO 1 has received 1 MsgPreVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgPreVote Term:1 Log:1/10 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgPreVote for 1 [logterm: 1, index: 10] at term 0 +> 3 receiving messages + 1->3 MsgPreVote Term:1 Log:1/10 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgPreVote for 1 [logterm: 1, index: 10] at term 0 +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgPreVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=false: + Messages: + 3->1 MsgPreVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgPreVoteResp Term:1 Log:0/0 + INFO 1 received MsgPreVoteResp from 2 at term 0 + INFO 1 has received 2 MsgPreVoteResp votes and 0 vote rejections + INFO 1 became candidate at term 1 + INFO 1 [logterm: 1, index: 10] sent MsgVote request to 2 at term 1 + INFO 1 [logterm: 1, index: 10] sent MsgVote request to 3 at term 1 + 3->1 MsgPreVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVote Term:1 Log:1/10 + 1->3 MsgVote Term:1 Log:1/10 + INFO 1 received MsgVoteResp from 1 at term 1 + INFO 1 has received 1 MsgVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgVote Term:1 Log:1/10 + INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 2 became follower at term 1 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 3 receiving messages + 1->3 MsgVote Term:1 Log:1/10 + INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 3 became follower at term 1 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgVoteResp Term:1 Log:0/0 + INFO 1 received MsgVoteResp from 2 at term 1 + INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 1 became leader at term 1 + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 receiving messages + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 3 receiving messages + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 receiving messages + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgApp Term:1 Log:1/11 Commit:11 + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/11 Commit:11 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 + +store-liveness +---- + 1 2 3 +1 1 1 1 +2 1 1 1 +3 1 1 1 + +set-randomized-election-timeout 2 timeout=3 +---- +ok + +# Campaigning will fail when there is an active leader. +campaign 2 +---- +DEBUG 2 ignoring MsgHup due to leader fortification + +tick-election 2 +---- +ok + +# Withdraw support from 2 for 1 and tick 2 once. This should trigger an election +# (without having to wait out an entire randomized election timeout) because +# we're smart in recognizing when store liveness support expires. +# and tick an election. 2 should now be able to +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + +tick-heartbeat 2 +---- +DEBUG 2 setting election elapsed to start from 3 ticks after store liveness support expired +INFO 2 is starting a new election at term 1 +INFO 2 became pre-candidate at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 1 at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 3 at term 1 + +# Set the randomized timeout for 3 to a value higher than election-timeout. This +# way, tick-election is guaranteed to not call an election which would reset +# the electionTimer (which would in-turn prevent 3 from granting 2 its vote). + +# Set the randomized timeout for 3 to 4, which is 1 tick more than the +# election-tick. We then withdraw store liveness support from 3 for 1; 3 should +# then wait for 1 tick before campaigning. This then shows that we're preserving +# randomness that's baked into raft elections without waiting out an entire +# election timeout. +set-randomized-election-timeout 3 timeout=4 +---- +ok + +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +tick-heartbeat 3 +---- +DEBUG 3 setting election elapsed to start from 3 ticks after store liveness support expired + +tick-heartbeat 3 +---- +INFO 3 is starting a new election at term 1 +INFO 3 became pre-candidate at term 1 +INFO 3 [logterm: 1, index: 11] sent MsgPreVote request to 1 at term 1 +INFO 3 [logterm: 1, index: 11] sent MsgPreVote request to 2 at term 1 diff --git a/pkg/raft/testdata/fortification_followers_dont_prevote.txt b/pkg/raft/testdata/fortification_followers_dont_prevote.txt new file mode 100644 index 000000000000..0a1d1d56104d --- /dev/null +++ b/pkg/raft/testdata/fortification_followers_dont_prevote.txt @@ -0,0 +1,505 @@ +# Test to ensure that a follower will not vote for another peer +# if they're supporting a fortified leader. Pre-vote is turned on. + +log-level debug +---- +ok + +add-nodes 3 voters=(1,2,3) index=10 prevote=true +---- +INFO 1 switched to configuration voters=(1 2 3) +INFO 1 became follower at term 0 +INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 2 switched to configuration voters=(1 2 3) +INFO 2 became follower at term 0 +INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 3 switched to configuration voters=(1 2 3) +INFO 3 became follower at term 0 +INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] + +campaign 1 +---- +INFO 1 is starting a new election at term 0 +INFO 1 became pre-candidate at term 0 +INFO 1 [logterm: 1, index: 10] sent MsgPreVote request to 2 at term 0 +INFO 1 [logterm: 1, index: 10] sent MsgPreVote request to 3 at term 0 + +stabilize +---- +> 1 handling Ready + Ready MustSync=false: + State:StatePreCandidate + Messages: + 1->2 MsgPreVote Term:1 Log:1/10 + 1->3 MsgPreVote Term:1 Log:1/10 + INFO 1 received MsgPreVoteResp from 1 at term 0 + INFO 1 has received 1 MsgPreVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgPreVote Term:1 Log:1/10 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgPreVote for 1 [logterm: 1, index: 10] at term 0 +> 3 receiving messages + 1->3 MsgPreVote Term:1 Log:1/10 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgPreVote for 1 [logterm: 1, index: 10] at term 0 +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgPreVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=false: + Messages: + 3->1 MsgPreVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgPreVoteResp Term:1 Log:0/0 + INFO 1 received MsgPreVoteResp from 2 at term 0 + INFO 1 has received 2 MsgPreVoteResp votes and 0 vote rejections + INFO 1 became candidate at term 1 + INFO 1 [logterm: 1, index: 10] sent MsgVote request to 2 at term 1 + INFO 1 [logterm: 1, index: 10] sent MsgVote request to 3 at term 1 + 3->1 MsgPreVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVote Term:1 Log:1/10 + 1->3 MsgVote Term:1 Log:1/10 + INFO 1 received MsgVoteResp from 1 at term 1 + INFO 1 has received 1 MsgVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgVote Term:1 Log:1/10 + INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 2 became follower at term 1 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 3 receiving messages + 1->3 MsgVote Term:1 Log:1/10 + INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 3 became follower at term 1 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgVoteResp Term:1 Log:0/0 + INFO 1 received MsgVoteResp from 2 at term 1 + INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 1 became leader at term 1 + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 receiving messages + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 3 receiving messages + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 receiving messages + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgApp Term:1 Log:1/11 Commit:11 + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/11 Commit:11 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 + +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + +# Ensure neither 1 or 3 vote for 2 as both of them are still +# supporting the fortified leader (1). +campaign 2 +---- +INFO 2 is starting a new election at term 1 +INFO 2 became pre-candidate at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 1 at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 3 at term 1 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + State:StatePreCandidate + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgPreVote Term:2 Log:1/11 + 2->3 MsgPreVote Term:2 Log:1/11 + INFO 2 received MsgPreVoteResp from 2 at term 1 + INFO 2 has received 1 MsgPreVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgPreVote Term:2 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgPreVote Term:2 Log:1/11 + INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 + +raft-state +---- +1: StateLeader (Voter) Term:1 Lead:1 +2: StatePreCandidate (Voter) Term:1 Lead:0 +3: StateFollower (Voter) Term:1 Lead:1 + +# However, once a quorum withdraws support for the fortified leader, 2 can then +# be elected. +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +campaign 2 +---- +INFO 2 is starting a new election at term 1 +INFO 2 became pre-candidate at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 1 at term 1 +INFO 2 [logterm: 1, index: 11] sent MsgPreVote request to 3 at term 1 + +stabilize +---- +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgPreVote Term:2 Log:1/11 + 2->3 MsgPreVote Term:2 Log:1/11 + INFO 2 received MsgPreVoteResp from 2 at term 1 + INFO 2 has received 1 MsgPreVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgPreVote Term:2 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgPreVote Term:2 Log:1/11 + INFO 3 [logterm: 1, index: 11, vote: 1] cast MsgPreVote for 2 [logterm: 1, index: 11] at term 1 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 + Messages: + 3->2 MsgPreVoteResp Term:2 Log:0/0 +> 2 receiving messages + 3->2 MsgPreVoteResp Term:2 Log:0/0 + INFO 2 received MsgPreVoteResp from 3 at term 1 + INFO 2 has received 2 MsgPreVoteResp votes and 0 vote rejections + INFO 2 became candidate at term 2 + INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2 + INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 2 +> 2 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:2 Log:1/11 + 2->3 MsgVote Term:2 Log:1/11 + INFO 2 received MsgVoteResp from 2 at term 2 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:2 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgVote Term:2 Log:1/11 + INFO 3 [term: 1] received a MsgVote message with higher term from 2 [term: 2] + INFO 3 became follower at term 2 + INFO 3 [logterm: 1, index: 11, vote: 0] cast MsgVote for 2 [logterm: 1, index: 11] at term 2 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 3->2 MsgVoteResp Term:2 Log:0/0 +> 2 receiving messages + 3->2 MsgVoteResp Term:2 Log:0/0 + INFO 2 received MsgVoteResp from 3 at term 2 + INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 2 became leader at term 2 +> 2 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 2/12 EntryNormal "" + Messages: + 2->1 MsgFortifyLeader Term:2 Log:0/0 + 2->3 MsgFortifyLeader Term:2 Log:0/0 + 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] + 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 1 receiving messages + 2->1 MsgFortifyLeader Term:2 Log:0/0 + INFO 1 [term: 1] received a MsgFortifyLeader message with higher term from 2 [term: 2] + INFO 1 became follower at term 2 + 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 3 receiving messages + 2->3 MsgFortifyLeader Term:2 Log:0/0 + 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 1 handling Ready + Ready MustSync=true: + State:StateFollower + HardState Term:2 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 2/12 EntryNormal "" + Messages: + 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 2/12 EntryNormal "" + Messages: + 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 2 receiving messages + 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:2 Log:0/12 Commit:11 + 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 2->1 MsgApp Term:2 Log:2/12 Commit:12 + 2->3 MsgApp Term:2 Log:2/12 Commit:12 +> 1 receiving messages + 2->1 MsgApp Term:2 Log:2/12 Commit:12 +> 3 receiving messages + 2->3 MsgApp Term:2 Log:2/12 Commit:12 +> 1 handling Ready + Ready MustSync=true: + HardState Term:2 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 1->2 MsgAppResp Term:2 Log:0/12 Commit:12 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 3->2 MsgAppResp Term:2 Log:0/12 Commit:12 +> 2 receiving messages + 1->2 MsgAppResp Term:2 Log:0/12 Commit:12 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:12 + +raft-state +---- +1: StateFollower (Voter) Term:2 Lead:2 +2: StateLeader (Voter) Term:2 Lead:2 +3: StateFollower (Voter) Term:2 Lead:2 + +# Lastly, ensure that the leader is able to successfully campaign at a higher +# term. We'll need to step down to set this up properly, as otherwise attempts +# to campaign will no-op. +step-down 2 +---- +INFO 2 became follower at term 2 + +campaign 2 +---- +INFO 2 is starting a new election at term 2 +INFO 2 became pre-candidate at term 2 +INFO 2 [logterm: 2, index: 12] sent MsgPreVote request to 1 at term 2 +INFO 2 [logterm: 2, index: 12] sent MsgPreVote request to 3 at term 2 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + State:StatePreCandidate + HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgPreVote Term:3 Log:2/12 + 2->3 MsgPreVote Term:3 Log:2/12 + INFO 2 received MsgPreVoteResp from 2 at term 2 + INFO 2 has received 1 MsgPreVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgPreVote Term:3 Log:2/12 + INFO 1 [logterm: 2, index: 12, vote: 0] cast MsgPreVote for 2 [logterm: 2, index: 12] at term 2 +> 3 receiving messages + 2->3 MsgPreVote Term:3 Log:2/12 + INFO 3 [logterm: 2, index: 12, vote: 2] cast MsgPreVote for 2 [logterm: 2, index: 12] at term 2 +> 1 handling Ready + Ready MustSync=true: + HardState Term:2 Commit:12 Lead:2 LeadEpoch:0 + Messages: + 1->2 MsgPreVoteResp Term:3 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:0 + Messages: + 3->2 MsgPreVoteResp Term:3 Log:0/0 +> 2 receiving messages + 1->2 MsgPreVoteResp Term:3 Log:0/0 + INFO 2 received MsgPreVoteResp from 1 at term 2 + INFO 2 has received 2 MsgPreVoteResp votes and 0 vote rejections + INFO 2 became candidate at term 3 + INFO 2 [logterm: 2, index: 12] sent MsgVote request to 1 at term 3 + INFO 2 [logterm: 2, index: 12] sent MsgVote request to 3 at term 3 + 3->2 MsgPreVoteResp Term:3 Log:0/0 +> 2 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:3 Log:2/12 + 2->3 MsgVote Term:3 Log:2/12 + INFO 2 received MsgVoteResp from 2 at term 3 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:3 Log:2/12 + INFO 1 [term: 2] received a MsgVote message with higher term from 2 [term: 3] + INFO 1 became follower at term 3 + INFO 1 [logterm: 2, index: 12, vote: 0] cast MsgVote for 2 [logterm: 2, index: 12] at term 3 +> 3 receiving messages + 2->3 MsgVote Term:3 Log:2/12 + INFO 3 [term: 2] received a MsgVote message with higher term from 2 [term: 3] + INFO 3 became follower at term 3 + INFO 3 [logterm: 2, index: 12, vote: 0] cast MsgVote for 2 [logterm: 2, index: 12] at term 3 +> 1 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVoteResp Term:3 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 3->2 MsgVoteResp Term:3 Log:0/0 +> 2 receiving messages + 1->2 MsgVoteResp Term:3 Log:0/0 + INFO 2 received MsgVoteResp from 1 at term 3 + INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 2 became leader at term 3 + 3->2 MsgVoteResp Term:3 Log:0/0 +> 2 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 3/13 EntryNormal "" + Messages: + 2->1 MsgFortifyLeader Term:3 Log:0/0 + 2->3 MsgFortifyLeader Term:3 Log:0/0 + 2->1 MsgApp Term:3 Log:2/12 Commit:12 Entries:[3/13 EntryNormal ""] + 2->3 MsgApp Term:3 Log:2/12 Commit:12 Entries:[3/13 EntryNormal ""] +> 1 receiving messages + 2->1 MsgFortifyLeader Term:3 Log:0/0 + 2->1 MsgApp Term:3 Log:2/12 Commit:12 Entries:[3/13 EntryNormal ""] +> 3 receiving messages + 2->3 MsgFortifyLeader Term:3 Log:0/0 + 2->3 MsgApp Term:3 Log:2/12 Commit:12 Entries:[3/13 EntryNormal ""] +> 1 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 3/13 EntryNormal "" + Messages: + 1->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:3 Log:0/13 Commit:12 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 3/13 EntryNormal "" + Messages: + 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:3 Log:0/13 Commit:12 +> 2 receiving messages + 1->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:3 Log:0/13 Commit:12 + 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:3 Log:0/13 Commit:12 +> 2 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/13 EntryNormal "" + Messages: + 2->1 MsgApp Term:3 Log:3/13 Commit:13 + 2->3 MsgApp Term:3 Log:3/13 Commit:13 +> 1 receiving messages + 2->1 MsgApp Term:3 Log:3/13 Commit:13 +> 3 receiving messages + 2->3 MsgApp Term:3 Log:3/13 Commit:13 +> 1 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/13 EntryNormal "" + Messages: + 1->2 MsgAppResp Term:3 Log:0/13 Commit:13 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/13 EntryNormal "" + Messages: + 3->2 MsgAppResp Term:3 Log:0/13 Commit:13 +> 2 receiving messages + 1->2 MsgAppResp Term:3 Log:0/13 Commit:13 + 3->2 MsgAppResp Term:3 Log:0/13 Commit:13 + +raft-state +---- +1: StateFollower (Voter) Term:3 Lead:2 +2: StateLeader (Voter) Term:3 Lead:2 +3: StateFollower (Voter) Term:3 Lead:2 diff --git a/pkg/raft/testdata/fortification_followers_dont_vote.txt b/pkg/raft/testdata/fortification_followers_dont_vote.txt new file mode 100644 index 000000000000..468b7ef8a99a --- /dev/null +++ b/pkg/raft/testdata/fortification_followers_dont_vote.txt @@ -0,0 +1,416 @@ +# Test to ensure that a follower will not vote for another peer +# if they're supporting a fortified leader. + +log-level debug +---- +ok + +add-nodes 3 voters=(1,2,3) index=10 +---- +INFO 1 switched to configuration voters=(1 2 3) +INFO 1 became follower at term 0 +INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 2 switched to configuration voters=(1 2 3) +INFO 2 became follower at term 0 +INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 3 switched to configuration voters=(1 2 3) +INFO 3 became follower at term 0 +INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] + +campaign 1 +---- +INFO 1 is starting a new election at term 0 +INFO 1 became candidate at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 2 at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 3 at term 1 + +stabilize +---- +> 1 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVote Term:1 Log:1/10 + 1->3 MsgVote Term:1 Log:1/10 + INFO 1 received MsgVoteResp from 1 at term 1 + INFO 1 has received 1 MsgVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgVote Term:1 Log:1/10 + INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 2 became follower at term 1 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 3 receiving messages + 1->3 MsgVote Term:1 Log:1/10 + INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 3 became follower at term 1 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgVoteResp Term:1 Log:0/0 + INFO 1 received MsgVoteResp from 2 at term 1 + INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 1 became leader at term 1 + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 receiving messages + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 3 receiving messages + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 receiving messages + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgApp Term:1 Log:1/11 Commit:11 + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/11 Commit:11 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 + +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + +# Ensure neither 1 or 3 vote for 2 as both of them are still +# supporting the fortified leader (1). +campaign 2 +---- +INFO 2 is starting a new election at term 1 +INFO 2 became candidate at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 2 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:2 Log:1/11 + 2->3 MsgVote Term:2 Log:1/11 + INFO 2 received MsgVoteResp from 2 at term 2 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:2 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgVote Term:2 Log:1/11 + INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 + +raft-state +---- +1: StateLeader (Voter) Term:1 Lead:1 +2: StateCandidate (Voter) Term:2 Lead:0 +3: StateFollower (Voter) Term:1 Lead:1 + +# However, once a quorum withdraws support for the fortified leader, 2 can then +# be elected. +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +campaign 2 +---- +INFO 2 is starting a new election at term 2 +INFO 2 became candidate at term 3 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 3 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 3 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:3 Log:1/11 + 2->3 MsgVote Term:3 Log:1/11 + INFO 2 received MsgVoteResp from 2 at term 3 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:3 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgVote Term:3 Log:1/11 + INFO 3 [term: 1] received a MsgVote message with higher term from 2 [term: 3] + INFO 3 became follower at term 3 + INFO 3 [logterm: 1, index: 11, vote: 0] cast MsgVote for 2 [logterm: 1, index: 11] at term 3 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 3->2 MsgVoteResp Term:3 Log:0/0 +> 2 receiving messages + 3->2 MsgVoteResp Term:3 Log:0/0 + INFO 2 received MsgVoteResp from 3 at term 3 + INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 2 became leader at term 3 +> 2 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:3 Vote:2 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 3/12 EntryNormal "" + Messages: + 2->1 MsgFortifyLeader Term:3 Log:0/0 + 2->3 MsgFortifyLeader Term:3 Log:0/0 + 2->1 MsgApp Term:3 Log:1/11 Commit:11 Entries:[3/12 EntryNormal ""] + 2->3 MsgApp Term:3 Log:1/11 Commit:11 Entries:[3/12 EntryNormal ""] +> 1 receiving messages + 2->1 MsgFortifyLeader Term:3 Log:0/0 + INFO 1 [term: 1] received a MsgFortifyLeader message with higher term from 2 [term: 3] + INFO 1 became follower at term 3 + 2->1 MsgApp Term:3 Log:1/11 Commit:11 Entries:[3/12 EntryNormal ""] +> 3 receiving messages + 2->3 MsgFortifyLeader Term:3 Log:0/0 + 2->3 MsgApp Term:3 Log:1/11 Commit:11 Entries:[3/12 EntryNormal ""] +> 1 handling Ready + Ready MustSync=true: + State:StateFollower + HardState Term:3 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 3/12 EntryNormal "" + Messages: + 1->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:3 Log:0/12 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:11 Lead:2 LeadEpoch:1 + Entries: + 3/12 EntryNormal "" + Messages: + 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:3 Log:0/12 Commit:11 +> 2 receiving messages + 1->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:3 Log:0/12 Commit:11 + 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:3 Log:0/12 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/12 EntryNormal "" + Messages: + 2->1 MsgApp Term:3 Log:3/12 Commit:12 + 2->3 MsgApp Term:3 Log:3/12 Commit:12 +> 1 receiving messages + 2->1 MsgApp Term:3 Log:3/12 Commit:12 +> 3 receiving messages + 2->3 MsgApp Term:3 Log:3/12 Commit:12 +> 1 handling Ready + Ready MustSync=true: + HardState Term:3 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/12 EntryNormal "" + Messages: + 1->2 MsgAppResp Term:3 Log:0/12 Commit:12 +> 3 handling Ready + Ready MustSync=true: + HardState Term:3 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + CommittedEntries: + 3/12 EntryNormal "" + Messages: + 3->2 MsgAppResp Term:3 Log:0/12 Commit:12 +> 2 receiving messages + 1->2 MsgAppResp Term:3 Log:0/12 Commit:12 + 3->2 MsgAppResp Term:3 Log:0/12 Commit:12 + +raft-state +---- +1: StateFollower (Voter) Term:3 Lead:2 +2: StateLeader (Voter) Term:3 Lead:2 +3: StateFollower (Voter) Term:3 Lead:2 + +# Lastly, ensure that the leader is able to successfully campaign at a higher +# term. We'll need to step down to set this up properly, as otherwise attempts +# to campaign will no-op. +step-down 2 +---- +INFO 2 became follower at term 3 + +campaign 2 +---- +INFO 2 is starting a new election at term 3 +INFO 2 became candidate at term 4 +INFO 2 [logterm: 3, index: 12] sent MsgVote request to 1 at term 4 +INFO 2 [logterm: 3, index: 12] sent MsgVote request to 3 at term 4 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:4 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:4 Log:3/12 + 2->3 MsgVote Term:4 Log:3/12 + INFO 2 received MsgVoteResp from 2 at term 4 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:4 Log:3/12 + INFO 1 [term: 3] received a MsgVote message with higher term from 2 [term: 4] + INFO 1 became follower at term 4 + INFO 1 [logterm: 3, index: 12, vote: 0] cast MsgVote for 2 [logterm: 3, index: 12] at term 4 +> 3 receiving messages + 2->3 MsgVote Term:4 Log:3/12 + INFO 3 [term: 3] received a MsgVote message with higher term from 2 [term: 4] + INFO 3 became follower at term 4 + INFO 3 [logterm: 3, index: 12, vote: 0] cast MsgVote for 2 [logterm: 3, index: 12] at term 4 +> 1 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVoteResp Term:4 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:12 Lead:0 LeadEpoch:0 + Messages: + 3->2 MsgVoteResp Term:4 Log:0/0 +> 2 receiving messages + 1->2 MsgVoteResp Term:4 Log:0/0 + INFO 2 received MsgVoteResp from 1 at term 4 + INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 2 became leader at term 4 + 3->2 MsgVoteResp Term:4 Log:0/0 +> 2 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:4 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 4/13 EntryNormal "" + Messages: + 2->1 MsgFortifyLeader Term:4 Log:0/0 + 2->3 MsgFortifyLeader Term:4 Log:0/0 + 2->1 MsgApp Term:4 Log:3/12 Commit:12 Entries:[4/13 EntryNormal ""] + 2->3 MsgApp Term:4 Log:3/12 Commit:12 Entries:[4/13 EntryNormal ""] +> 1 receiving messages + 2->1 MsgFortifyLeader Term:4 Log:0/0 + 2->1 MsgApp Term:4 Log:3/12 Commit:12 Entries:[4/13 EntryNormal ""] +> 3 receiving messages + 2->3 MsgFortifyLeader Term:4 Log:0/0 + 2->3 MsgApp Term:4 Log:3/12 Commit:12 Entries:[4/13 EntryNormal ""] +> 1 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 4/13 EntryNormal "" + Messages: + 1->2 MsgFortifyLeaderResp Term:4 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:4 Log:0/13 Commit:12 +> 3 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:12 Lead:2 LeadEpoch:1 + Entries: + 4/13 EntryNormal "" + Messages: + 3->2 MsgFortifyLeaderResp Term:4 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:4 Log:0/13 Commit:12 +> 2 receiving messages + 1->2 MsgFortifyLeaderResp Term:4 Log:0/0 LeadEpoch:1 + 1->2 MsgAppResp Term:4 Log:0/13 Commit:12 + 3->2 MsgFortifyLeaderResp Term:4 Log:0/0 LeadEpoch:1 + 3->2 MsgAppResp Term:4 Log:0/13 Commit:12 +> 2 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 4/13 EntryNormal "" + Messages: + 2->1 MsgApp Term:4 Log:4/13 Commit:13 + 2->3 MsgApp Term:4 Log:4/13 Commit:13 +> 1 receiving messages + 2->1 MsgApp Term:4 Log:4/13 Commit:13 +> 3 receiving messages + 2->3 MsgApp Term:4 Log:4/13 Commit:13 +> 1 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 4/13 EntryNormal "" + Messages: + 1->2 MsgAppResp Term:4 Log:0/13 Commit:13 +> 3 handling Ready + Ready MustSync=true: + HardState Term:4 Vote:2 Commit:13 Lead:2 LeadEpoch:1 + CommittedEntries: + 4/13 EntryNormal "" + Messages: + 3->2 MsgAppResp Term:4 Log:0/13 Commit:13 +> 2 receiving messages + 1->2 MsgAppResp Term:4 Log:0/13 Commit:13 + 3->2 MsgAppResp Term:4 Log:0/13 Commit:13 + +raft-state +---- +1: StateFollower (Voter) Term:4 Lead:2 +2: StateLeader (Voter) Term:4 Lead:2 +3: StateFollower (Voter) Term:4 Lead:2 diff --git a/pkg/raft/testdata/fortification_leader_does_not_support_itself.txt b/pkg/raft/testdata/fortification_leader_does_not_support_itself.txt index 7028cd3a9be4..e7eb4c806425 100644 --- a/pkg/raft/testdata/fortification_leader_does_not_support_itself.txt +++ b/pkg/raft/testdata/fortification_leader_does_not_support_itself.txt @@ -130,7 +130,7 @@ stabilize 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:2 3->1 MsgAppResp Term:1 Log:0/3 Commit:2 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:0 CommittedEntries: 1/3 EntryNormal "" @@ -142,14 +142,14 @@ stabilize > 3 receiving messages 1->3 MsgApp Term:1 Log:1/3 Commit:3 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:3 CommittedEntries: 1/3 EntryNormal "" Messages: 2->1 MsgAppResp Term:1 Log:0/3 Commit:3 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:3 Lead:1 LeadEpoch:2 CommittedEntries: 1/3 EntryNormal "" diff --git a/pkg/raft/testdata/fortification_support_tracking.txt b/pkg/raft/testdata/fortification_support_tracking.txt new file mode 100644 index 000000000000..e6c30992e891 --- /dev/null +++ b/pkg/raft/testdata/fortification_support_tracking.txt @@ -0,0 +1,281 @@ +# Test to ensure that leaders correctly track fortification support. + +log-level debug +---- +ok + +add-nodes 3 voters=(1,2,3) index=10 +---- +INFO 1 switched to configuration voters=(1 2 3) +INFO 1 became follower at term 0 +INFO newRaft 1 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 2 switched to configuration voters=(1 2 3) +INFO 2 became follower at term 0 +INFO newRaft 2 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] +INFO 3 switched to configuration voters=(1 2 3) +INFO 3 became follower at term 0 +INFO newRaft 3 [peers: [1,2,3], term: 0, commit: 10, applied: 10, lastindex: 10, lastterm: 1] + +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +campaign 1 +---- +INFO 1 is starting a new election at term 0 +INFO 1 became candidate at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 2 at term 1 +INFO 1 [logterm: 1, index: 10] sent MsgVote request to 3 at term 1 + +stabilize +---- +> 1 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 1->2 MsgVote Term:1 Log:1/10 + 1->3 MsgVote Term:1 Log:1/10 + INFO 1 received MsgVoteResp from 1 at term 1 + INFO 1 has received 1 MsgVoteResp votes and 0 vote rejections +> 2 receiving messages + 1->2 MsgVote Term:1 Log:1/10 + INFO 2 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 2 became follower at term 1 + INFO 2 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 3 receiving messages + 1->3 MsgVote Term:1 Log:1/10 + INFO 3 [term: 0] received a MsgVote message with higher term from 1 [term: 1] + INFO 3 became follower at term 1 + INFO 3 [logterm: 1, index: 10, vote: 0] cast MsgVote for 1 [logterm: 1, index: 10] at term 1 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVoteResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:0 LeadEpoch:0 + Messages: + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgVoteResp Term:1 Log:0/0 + INFO 1 received MsgVoteResp from 2 at term 1 + INFO 1 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 1 became leader at term 1 + 3->1 MsgVoteResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:1 + Entries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 receiving messages + 1->2 MsgFortifyLeader Term:1 Log:0/0 + 1->2 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 3 receiving messages + 1->3 MsgFortifyLeader Term:1 Log:0/0 + 1->3 MsgApp Term:1 Log:1/10 Commit:10 Entries:[1/11 EntryNormal ""] +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:0 + Entries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 Rejected (Hint: 0) + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:10 Lead:1 LeadEpoch:0 + Entries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 Rejected (Hint: 0) + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 receiving messages + 2->1 MsgFortifyLeaderResp Term:1 Log:0/0 Rejected (Hint: 0) + 2->1 MsgAppResp Term:1 Log:0/11 Commit:10 + 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 Rejected (Hint: 0) + 3->1 MsgAppResp Term:1 Log:0/11 Commit:10 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 1->2 MsgApp Term:1 Log:1/11 Commit:11 + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/11 Commit:11 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/11 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 + CommittedEntries: + 1/11 EntryNormal "" + Messages: + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/11 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 + +print-support-state 1 +---- +1 : 1 + +bump-epoch 2 +---- + 1 2 3 +1 1 2 1 +2 x 2 1 +3 x 2 1 + +withdraw-support 3 2 +---- + 1 2 3 +1 1 2 1 +2 x 2 1 +3 x x 1 + +grant-support 3 2 +---- + 1 2 3 +1 1 2 1 +2 x 3 1 +3 x 3 1 + +campaign 2 +---- +INFO 2 is starting a new election at term 1 +INFO 2 became candidate at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 1 at term 2 +INFO 2 [logterm: 1, index: 11] sent MsgVote request to 3 at term 2 + +stabilize +---- +> 2 handling Ready + Ready MustSync=true: + State:StateCandidate + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 2->1 MsgVote Term:2 Log:1/11 + 2->3 MsgVote Term:2 Log:1/11 + INFO 2 received MsgVoteResp from 2 at term 2 + INFO 2 has received 1 MsgVoteResp votes and 0 vote rejections +> 1 receiving messages + 2->1 MsgVote Term:2 Log:1/11 + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 2 [logterm: 1, index: 11] at term 1: supporting fortified leader 1 at epoch 1 +> 3 receiving messages + 2->3 MsgVote Term:2 Log:1/11 + INFO 3 [term: 1] received a MsgVote message with higher term from 2 [term: 2] + INFO 3 became follower at term 2 + INFO 3 [logterm: 1, index: 11, vote: 0] cast MsgVote for 2 [logterm: 1, index: 11] at term 2 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:11 Lead:0 LeadEpoch:0 + Messages: + 3->2 MsgVoteResp Term:2 Log:0/0 +> 2 receiving messages + 3->2 MsgVoteResp Term:2 Log:0/0 + INFO 2 received MsgVoteResp from 3 at term 2 + INFO 2 has received 2 MsgVoteResp votes and 0 vote rejections + INFO 2 became leader at term 2 +> 2 handling Ready + Ready MustSync=true: + State:StateLeader + HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:3 + Entries: + 2/12 EntryNormal "" + Messages: + 2->1 MsgFortifyLeader Term:2 Log:0/0 + 2->3 MsgFortifyLeader Term:2 Log:0/0 + 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] + 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 1 receiving messages + 2->1 MsgFortifyLeader Term:2 Log:0/0 + INFO 1 [term: 1] received a MsgFortifyLeader message with higher term from 2 [term: 2] + INFO 1 became follower at term 2 + 2->1 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 3 receiving messages + 2->3 MsgFortifyLeader Term:2 Log:0/0 + 2->3 MsgApp Term:2 Log:1/11 Commit:11 Entries:[2/12 EntryNormal ""] +> 1 handling Ready + Ready MustSync=true: + State:StateFollower + HardState Term:2 Commit:11 Lead:2 LeadEpoch:2 + Entries: + 2/12 EntryNormal "" + Messages: + 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:2 + 1->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:11 Lead:2 LeadEpoch:3 + Entries: + 2/12 EntryNormal "" + Messages: + 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:3 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 2 receiving messages + 1->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:2 + 1->2 MsgAppResp Term:2 Log:0/12 Commit:11 + 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:3 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:11 +> 2 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:3 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 2->1 MsgApp Term:2 Log:2/12 Commit:12 + 2->3 MsgApp Term:2 Log:2/12 Commit:12 +> 1 receiving messages + 2->1 MsgApp Term:2 Log:2/12 Commit:12 +> 3 receiving messages + 2->3 MsgApp Term:2 Log:2/12 Commit:12 +> 1 handling Ready + Ready MustSync=true: + HardState Term:2 Commit:12 Lead:2 LeadEpoch:2 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 1->2 MsgAppResp Term:2 Log:0/12 Commit:12 +> 3 handling Ready + Ready MustSync=true: + HardState Term:2 Vote:2 Commit:12 Lead:2 LeadEpoch:3 + CommittedEntries: + 2/12 EntryNormal "" + Messages: + 3->2 MsgAppResp Term:2 Log:0/12 Commit:12 +> 2 receiving messages + 1->2 MsgAppResp Term:2 Log:0/12 Commit:12 + 3->2 MsgAppResp Term:2 Log:0/12 Commit:12 + +print-support-state 2 +---- +1 : 2 +2 : 3 +3 : 3 diff --git a/pkg/raft/testdata/heartbeat_resp_recovers_from_probing.txt b/pkg/raft/testdata/heartbeat_resp_recovers_from_probing.txt index bb6eef1e8ed9..2647704bfb0d 100644 --- a/pkg/raft/testdata/heartbeat_resp_recovers_from_probing.txt +++ b/pkg/raft/testdata/heartbeat_resp_recovers_from_probing.txt @@ -28,20 +28,20 @@ ok status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateReplicate match=11 next=12 +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 # On the first replica, report the second one as not reachable. report-unreachable 1 2 ---- -DEBUG 1 failed to send message to 2 because it is unreachable [StateProbe match=11 next=12] +DEBUG 1 failed to send message to 2 because it is unreachable [StateProbe match=11 next=12 sentCommit=11 matchCommit=11] status 1 ---- -1: StateReplicate match=11 next=12 -2: StateProbe match=11 next=12 -3: StateReplicate match=11 next=12 +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateProbe match=11 next=12 sentCommit=11 matchCommit=11 +3: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 tick-heartbeat 1 ---- @@ -53,12 +53,12 @@ stabilize > 1 handling Ready Ready MustSync=false: Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=false: Messages: @@ -85,6 +85,6 @@ stabilize status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateReplicate match=11 next=12 +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 diff --git a/pkg/raft/testdata/lagging_commit.txt b/pkg/raft/testdata/lagging_commit.txt index c29ffdeea0d4..f7bf4e90cbc0 100644 --- a/pkg/raft/testdata/lagging_commit.txt +++ b/pkg/raft/testdata/lagging_commit.txt @@ -77,7 +77,7 @@ stabilize 1 2 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 2->1 MsgAppResp Term:1 Log:0/13 Commit:11 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 CommittedEntries: 1/12 EntryNormal "data1" @@ -91,7 +91,7 @@ stabilize 1 2 1->2 MsgApp Term:1 Log:1/13 Commit:12 1->2 MsgApp Term:1 Log:1/13 Commit:13 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 CommittedEntries: 1/12 EntryNormal "data1" @@ -114,9 +114,9 @@ dropped: 1->3 MsgApp Term:1 Log:1/13 Commit:13 status 1 ---- -1: StateReplicate match=13 next=14 -2: StateReplicate match=13 next=14 -3: StateReplicate match=11 next=14 inflight=2 +1: StateReplicate match=13 next=14 sentCommit=11 matchCommit=11 +2: StateReplicate match=13 next=14 sentCommit=13 matchCommit=13 +3: StateReplicate match=11 next=14 sentCommit=13 matchCommit=11 inflight=2 # The leader still observes that the entries are in-flight to the follower 3, # since it hasn't heard from it. Nothing triggers updating the follower's @@ -133,8 +133,8 @@ process-ready 1 ---- Ready MustSync=false: Messages: -1->2 MsgHeartbeat Term:1 Log:0/0 Commit:13 -1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 +1->2 MsgHeartbeat Term:1 Log:0/0 +1->3 MsgHeartbeat Term:1 Log:0/0 # Since the heartbeat message does not bump the follower's commit index, it will # take another roundtrip with the leader to update it. As such, the total time @@ -149,7 +149,7 @@ Messages: stabilize 1 3 ---- > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 3 handling Ready Ready MustSync=false: Messages: @@ -163,7 +163,7 @@ stabilize 1 3 > 3 receiving messages 1->3 MsgApp Term:1 Log:1/13 Commit:13 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 CommittedEntries: 1/12 EntryNormal "data1" diff --git a/pkg/raft/testdata/msg_app_commit_index.txt b/pkg/raft/testdata/msg_app_commit_index.txt new file mode 100644 index 000000000000..5fc3ba849a15 --- /dev/null +++ b/pkg/raft/testdata/msg_app_commit_index.txt @@ -0,0 +1,202 @@ +# This test demonstrates that the leader tracks the followers' commit index +# and tries to advance it if it's stale. + +# Skip logging the boilerplate. Set up a raft group of 3 nodes, and elect node 1 +# as the leader. Nodes 2 and 3 are the followers. +log-level none +---- +ok + +add-nodes 3 voters=(1, 2, 3) index=10 +---- +ok + +campaign 1 +---- +ok + +stabilize +---- +ok + +# Propose a couple of entries. +propose 1 data1 +---- +ok + +propose 1 data2 +---- +ok + +process-ready 1 +---- +ok + +# The interesting part starts below. +log-level debug +---- +ok + +deliver-msgs 2 3 +---- +1->2 MsgApp Term:1 Log:1/11 Commit:11 Entries:[1/12 EntryNormal "data1"] +1->2 MsgApp Term:1 Log:1/12 Commit:11 Entries:[1/13 EntryNormal "data2"] +1->3 MsgApp Term:1 Log:1/11 Commit:11 Entries:[1/12 EntryNormal "data1"] +1->3 MsgApp Term:1 Log:1/12 Commit:11 Entries:[1/13 EntryNormal "data2"] + +process-ready 3 +---- +Ready MustSync=true: +Entries: +1/12 EntryNormal "data1" +1/13 EntryNormal "data2" +Messages: +3->1 MsgAppResp Term:1 Log:0/12 Commit:11 +3->1 MsgAppResp Term:1 Log:0/13 Commit:11 + +# In the meantime, the entries are committed, and the leader sends the commit +# index to all the followers. +stabilize 1 2 +---- +> 2 handling Ready + Ready MustSync=true: + Entries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:11 +> 1 receiving messages + 3->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/13 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:11 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 1->2 MsgApp Term:1 Log:1/13 Commit:12 + 1->3 MsgApp Term:1 Log:1/13 Commit:12 + 1->2 MsgApp Term:1 Log:1/13 Commit:13 + 1->3 MsgApp Term:1 Log:1/13 Commit:13 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/13 Commit:12 + 1->2 MsgApp Term:1 Log:1/13 Commit:13 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 2->1 MsgAppResp Term:1 Log:0/13 Commit:12 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:13 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/13 Commit:12 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:13 + +# The network blip prevents the follower 3 from learning that the previously +# appended entries are now committed. +deliver-msgs drop=(3) +---- +dropped: 1->3 MsgApp Term:1 Log:1/13 Commit:12 +dropped: 1->3 MsgApp Term:1 Log:1/13 Commit:13 + +# The network blip ends here. + +status 1 +---- +1: StateReplicate match=13 next=14 sentCommit=11 matchCommit=11 +2: StateReplicate match=13 next=14 sentCommit=13 matchCommit=13 +3: StateReplicate match=13 next=14 sentCommit=13 matchCommit=11 + +# Wait for the next heartbeat response. +tick-heartbeat 1 +---- +ok + +process-ready 1 +---- +Ready MustSync=false: +Messages: +1->2 MsgHeartbeat Term:1 Log:0/0 +1->3 MsgHeartbeat Term:1 Log:0/0 + +# On the next MsgApp sent to follower 3, the leader will include that the +# commit index is 13. Notice that the leader doesn't send MsgApp to follower 2 +# because it knows that it has the latest commit index. +stabilize 1 2 3 +---- +> 2 receiving messages + 1->2 MsgHeartbeat Term:1 Log:0/0 +> 3 receiving messages + 1->3 MsgHeartbeat Term:1 Log:0/0 +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgHeartbeatResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=false: + Messages: + 3->1 MsgHeartbeatResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgHeartbeatResp Term:1 Log:0/0 + 3->1 MsgHeartbeatResp Term:1 Log:0/0 +> 1 handling Ready + Ready MustSync=false: + Messages: + 1->3 MsgApp Term:1 Log:1/13 Commit:13 +> 3 receiving messages + 1->3 MsgApp Term:1 Log:1/13 Commit:13 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 3->1 MsgAppResp Term:1 Log:0/13 Commit:13 +> 1 receiving messages + 3->1 MsgAppResp Term:1 Log:0/13 Commit:13 + +# If the commit index is up-to-date, no MsgApp will be sent. +tick-heartbeat 1 +---- +ok + +stabilize 1 2 3 +---- +> 1 handling Ready + Ready MustSync=false: + Messages: + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 +> 2 receiving messages + 1->2 MsgHeartbeat Term:1 Log:0/0 +> 3 receiving messages + 1->3 MsgHeartbeat Term:1 Log:0/0 +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgHeartbeatResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=false: + Messages: + 3->1 MsgHeartbeatResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgHeartbeatResp Term:1 Log:0/0 + 3->1 MsgHeartbeatResp Term:1 Log:0/0 + +# The leader's sentCommit and the matchCommit remain stale even after stabilize. +# The reason is that the leader send itself a MsgAppResp when it receives a +# MsgProp. However, the leader doesn't send itself a MsgAppResp when it +# broadcasts a MsgApp to the followers. That's fine because the leader doesn't +# use its sentCommit/matchCommit for anything. +status 1 +---- +1: StateReplicate match=13 next=14 sentCommit=11 matchCommit=11 +2: StateReplicate match=13 next=14 sentCommit=13 matchCommit=13 +3: StateReplicate match=13 next=14 sentCommit=13 matchCommit=13 diff --git a/pkg/raft/testdata/msg_app_commit_index_leader_old_version.txt b/pkg/raft/testdata/msg_app_commit_index_leader_old_version.txt new file mode 100644 index 000000000000..53d6de30a943 --- /dev/null +++ b/pkg/raft/testdata/msg_app_commit_index_leader_old_version.txt @@ -0,0 +1,161 @@ +# This test demonstrates the case where the new commit index advancement logic +# via MsgApp landed on some nodes, but not the leader. In this case, +# heartbeats will continue advancing the commit index normally. + +# Skip logging the boilerplate. Set up a raft group of 3 nodes, and elect node 1 +# as the leader. Nodes 2 and 3 are the followers. + +# TODO(ibrahim): Remove this test on versions >= 25.1 as it would no longer be +# possible to have a leader with an old version with commit index advancement +# MsgHeartbeat dependency. + +log-level none +---- +ok + +# Add one node that will become the leader and set its CRDB version to be old. +add-nodes 1 voters=(1, 2, 3) index=10 crdb-version=24.2 +---- +ok + +# Add two nodes with the new CRDB version. +add-nodes 2 voters=(1, 2, 3) index=10 crdb-version=24.3 +---- +ok + +campaign 1 +---- +ok + +stabilize +---- +ok + +# Propose a couple of entries. +propose 1 data1 +---- +ok + +propose 1 data2 +---- +ok + +process-ready 1 +---- +ok + +# The interesting part starts below. +log-level debug +---- +ok + +deliver-msgs 2 3 +---- +1->2 MsgApp Term:1 Log:1/11 Commit:11 Entries:[1/12 EntryNormal "data1"] +1->2 MsgApp Term:1 Log:1/12 Commit:11 Entries:[1/13 EntryNormal "data2"] +1->3 MsgApp Term:1 Log:1/11 Commit:11 Entries:[1/12 EntryNormal "data1"] +1->3 MsgApp Term:1 Log:1/12 Commit:11 Entries:[1/13 EntryNormal "data2"] + +process-ready 3 +---- +Ready MustSync=true: +Entries: +1/12 EntryNormal "data1" +1/13 EntryNormal "data2" +Messages: +3->1 MsgAppResp Term:1 Log:0/12 Commit:11 +3->1 MsgAppResp Term:1 Log:0/13 Commit:11 + +# In the meantime, the entries are committed, and the leader sends the commit +# index to all the followers. +stabilize 1 2 +---- +> 2 handling Ready + Ready MustSync=true: + Entries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:11 +> 1 receiving messages + 3->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 3->1 MsgAppResp Term:1 Log:0/13 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:11 +> 1 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 1->2 MsgApp Term:1 Log:1/13 Commit:12 + 1->3 MsgApp Term:1 Log:1/13 Commit:12 + 1->2 MsgApp Term:1 Log:1/13 Commit:13 + 1->3 MsgApp Term:1 Log:1/13 Commit:13 +> 2 receiving messages + 1->2 MsgApp Term:1 Log:1/13 Commit:12 + 1->2 MsgApp Term:1 Log:1/13 Commit:13 +> 2 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 2->1 MsgAppResp Term:1 Log:0/13 Commit:12 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:13 +> 1 receiving messages + 2->1 MsgAppResp Term:1 Log:0/13 Commit:12 + 2->1 MsgAppResp Term:1 Log:0/13 Commit:13 + +# The network blip prevents the follower 3 from learning that the previously +# appended entries are now committed. +deliver-msgs drop=(3) +---- +dropped: 1->3 MsgApp Term:1 Log:1/13 Commit:12 +dropped: 1->3 MsgApp Term:1 Log:1/13 Commit:13 + +# The matchCommit can be ignored in this test because if the leader actually +# doesn't have 23.3, it wouldn't even have the matchCommit field. +status 1 +---- +1: StateReplicate match=13 next=14 sentCommit=11 matchCommit=11 +2: StateReplicate match=13 next=14 sentCommit=13 matchCommit=13 +3: StateReplicate match=13 next=14 sentCommit=13 matchCommit=11 + +# Wait for the next heartbeat response. +tick-heartbeat 1 +---- +ok + +process-ready 1 +---- +Ready MustSync=false: +Messages: +1->2 MsgHeartbeat Term:1 Log:0/0 Commit:13 +1->3 MsgHeartbeat Term:1 Log:0/0 Commit:13 + +# No MsgApp will be sent since the leader sent the latest commit index. +stabilize 1 2 3 +---- +> 2 receiving messages + 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:13 +> 3 receiving messages + 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:13 +> 2 handling Ready + Ready MustSync=false: + Messages: + 2->1 MsgHeartbeatResp Term:1 Log:0/0 +> 3 handling Ready + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + CommittedEntries: + 1/12 EntryNormal "data1" + 1/13 EntryNormal "data2" + Messages: + 3->1 MsgHeartbeatResp Term:1 Log:0/0 +> 1 receiving messages + 2->1 MsgHeartbeatResp Term:1 Log:0/0 + 3->1 MsgHeartbeatResp Term:1 Log:0/0 diff --git a/pkg/raft/testdata/prevote.txt b/pkg/raft/testdata/prevote.txt index fd6e3cfe7451..8673a1332a87 100644 --- a/pkg/raft/testdata/prevote.txt +++ b/pkg/raft/testdata/prevote.txt @@ -56,6 +56,30 @@ raft-log 3 ---- 1/11 EntryNormal "" +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 1 1 1 +3 x 1 1 + +# Withdraw support for 1 (the previous leader) before campaigning from both 1 +# and 2; otherwise, 1 and 2 would trivially reject the campaign (because they +# support a fortified leader) without checking 3's log. +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + +withdraw-support 1 1 +---- + 1 2 3 +1 x 1 1 +2 x 1 1 +3 x 1 1 + campaign 3 ---- INFO 3 is starting a new election at term 1 @@ -67,13 +91,14 @@ process-ready 3 ---- Ready MustSync=true: State:StatePreCandidate -HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 +HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgPreVote Term:2 Log:1/11 3->2 MsgPreVote Term:2 Log:1/11 INFO 3 received MsgPreVoteResp from 3 at term 1 INFO 3 has received 1 MsgPreVoteResp votes and 0 vote rejections + deliver-msgs 1 2 ---- 2->1 MsgAppResp Term:1 Log:0/12 Commit:11 @@ -86,8 +111,8 @@ INFO 2 [logterm: 1, index: 12, vote: 1] rejected MsgPreVote from 3 [logterm: 1, stabilize ---- > 1 handling Ready - Ready MustSync=false: - HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:1 + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:0 CommittedEntries: 1/12 EntryNormal "prop_1" Messages: @@ -95,7 +120,8 @@ stabilize 1->3 MsgApp Term:1 Log:1/12 Commit:12 1->3 MsgPreVoteResp Term:1 Log:0/0 Rejected (Hint: 0) > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:11 Lead:1 LeadEpoch:0 Messages: 2->3 MsgPreVoteResp Term:1 Log:0/0 Rejected (Hint: 0) > 2 receiving messages @@ -107,8 +133,8 @@ stabilize 1->3 MsgPreVoteResp Term:1 Log:0/0 Rejected (Hint: 0) 2->3 MsgPreVoteResp Term:1 Log:0/0 Rejected (Hint: 0) > 2 handling Ready - Ready MustSync=false: - HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:1 + Ready MustSync=true: + HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:0 CommittedEntries: 1/12 EntryNormal "prop_1" Messages: @@ -116,7 +142,7 @@ stabilize > 3 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:12 Lead:1 LeadEpoch:0 Entries: 1/12 EntryNormal "prop_1" CommittedEntries: @@ -129,6 +155,7 @@ stabilize 3->1 MsgAppResp Term:1 Log:0/12 Commit:11 3->1 MsgAppResp Term:1 Log:0/12 Commit:12 + # Let 2 campaign. It should succeed, since it's up-to-date on the log. campaign 2 ---- @@ -142,7 +169,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:1 Vote:1 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:12 Lead:0 LeadEpoch:0 Messages: 2->1 MsgPreVote Term:2 Log:1/12 2->3 MsgPreVote Term:2 Log:1/12 @@ -173,7 +200,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVote Term:2 Log:1/12 2->3 MsgVote Term:2 Log:1/12 @@ -192,12 +219,12 @@ stabilize > 1 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 1->2 MsgVoteResp Term:2 Log:0/0 > 3 handling Ready Ready MustSync=true: - HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 3->2 MsgVoteResp Term:2 Log:0/0 > 2 receiving messages @@ -245,7 +272,7 @@ stabilize 3->2 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 3->2 MsgAppResp Term:2 Log:0/13 Commit:12 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:2 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 2/13 EntryNormal "" @@ -257,14 +284,14 @@ stabilize > 3 receiving messages 2->3 MsgApp Term:2 Log:2/13 Commit:13 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:2 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 2/13 EntryNormal "" Messages: 1->2 MsgAppResp Term:2 Log:0/13 Commit:13 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:2 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 2/13 EntryNormal "" diff --git a/pkg/raft/testdata/prevote_checkquorum.txt b/pkg/raft/testdata/prevote_checkquorum.txt index 7bd53ca5251b..5813e1526ecf 100644 --- a/pkg/raft/testdata/prevote_checkquorum.txt +++ b/pkg/raft/testdata/prevote_checkquorum.txt @@ -23,6 +23,13 @@ log-level debug ---- ok +withdraw-support 2 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 1 1 1 + # 2 should fail to campaign, leaving 1's leadership alone. campaign 2 ---- @@ -36,7 +43,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 Messages: 2->1 MsgPreVote Term:2 Log:1/11 2->3 MsgPreVote Term:2 Log:1/11 @@ -44,10 +51,10 @@ stabilize INFO 2 has received 1 MsgPreVoteResp votes and 0 vote rejections > 1 receiving messages 2->1 MsgPreVote Term:2 Log:1/11 - INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 > 3 receiving messages 2->3 MsgPreVote Term:2 Log:1/11 - INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 3 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 2 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 # If 2 hasn't heard from the leader in the past election timeout, it should # grant prevotes, allowing 3 to hold an election. @@ -59,6 +66,13 @@ tick-election 2 ---- ok +withdraw-support 3 1 +---- + 1 2 3 +1 1 1 1 +2 x 1 1 +3 x 1 1 + campaign 3 ---- INFO 3 is starting a new election at term 1 @@ -70,7 +84,7 @@ process-ready 3 ---- Ready MustSync=true: State:StatePreCandidate -HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:1 +HardState Term:1 Vote:1 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgPreVote Term:2 Log:1/11 3->2 MsgPreVote Term:2 Log:1/11 @@ -92,7 +106,7 @@ stabilize ---- > 1 receiving messages 3->1 MsgPreVote Term:2 Log:1/11 - INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgPreVote from 3 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 > 3 receiving messages 2->3 MsgPreVoteResp Term:2 Log:0/0 INFO 3 received MsgPreVoteResp from 2 at term 1 @@ -103,7 +117,7 @@ stabilize > 3 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: 3->1 MsgVote Term:2 Log:1/11 3->2 MsgVote Term:2 Log:1/11 @@ -111,7 +125,7 @@ stabilize INFO 3 has received 1 MsgVoteResp votes and 0 vote rejections > 1 receiving messages 3->1 MsgVote Term:2 Log:1/11 - INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 3 [logterm: 1, index: 11] at term 1: lease is not expired (remaining ticks: 3) + INFO 1 [logterm: 1, index: 11, vote: 1] ignored MsgVote from 3 [logterm: 1, index: 11] at term 1: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 1 at epoch 1 > 2 receiving messages 3->2 MsgVote Term:2 Log:1/11 INFO 2 [term: 1] received a MsgVote message with higher term from 3 [term: 2] @@ -120,7 +134,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:11 Lead:0 LeadEpoch:0 Messages: 2->3 MsgVoteResp Term:2 Log:0/0 > 3 receiving messages @@ -170,7 +184,7 @@ stabilize 2->3 MsgFortifyLeaderResp Term:2 Log:0/0 LeadEpoch:1 2->3 MsgAppResp Term:2 Log:0/12 Commit:11 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:3 Commit:12 Lead:3 LeadEpoch:1 CommittedEntries: 2/12 EntryNormal "" @@ -182,14 +196,14 @@ stabilize > 2 receiving messages 3->2 MsgApp Term:2 Log:2/12 Commit:12 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Commit:12 Lead:3 LeadEpoch:1 CommittedEntries: 2/12 EntryNormal "" Messages: 1->3 MsgAppResp Term:2 Log:0/12 Commit:12 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:2 Vote:3 Commit:12 Lead:3 LeadEpoch:1 CommittedEntries: 2/12 EntryNormal "" @@ -199,6 +213,13 @@ stabilize 1->3 MsgAppResp Term:2 Log:0/12 Commit:12 2->3 MsgAppResp Term:2 Log:0/12 Commit:12 +withdraw-support 1 3 +---- + 1 2 3 +1 1 1 x +2 x 1 1 +3 x 1 1 + # Node 3 is now the leader. Even though the leader is active, nodes 1 and 2 can # still win a prevote and election if they both explicitly campaign, since the # PreVote+CheckQuorum recent leader condition only applies to follower voters. @@ -219,7 +240,7 @@ stabilize > 1 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 1->2 MsgPreVote Term:3 Log:2/12 1->3 MsgPreVote Term:3 Log:2/12 @@ -227,10 +248,17 @@ stabilize INFO 1 has received 1 MsgPreVoteResp votes and 0 vote rejections > 2 receiving messages 1->2 MsgPreVote Term:3 Log:2/12 - INFO 2 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 1 [logterm: 2, index: 12] at term 2: lease is not expired (remaining ticks: 3) + INFO 2 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 1 [logterm: 2, index: 12] at term 2: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 3 at epoch 1 > 3 receiving messages 1->3 MsgPreVote Term:3 Log:2/12 - INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 1 [logterm: 2, index: 12] at term 2: lease is not expired (remaining ticks: 3) + INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 1 [logterm: 2, index: 12] at term 2: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 3 at epoch 1 + +withdraw-support 2 3 +---- + 1 2 3 +1 1 1 x +2 x 1 x +3 x 1 1 campaign 2 ---- @@ -244,7 +272,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StatePreCandidate - HardState Term:2 Vote:3 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:2 Vote:3 Commit:12 Lead:0 LeadEpoch:0 Messages: 2->1 MsgPreVote Term:3 Log:2/12 2->3 MsgPreVote Term:3 Log:2/12 @@ -255,7 +283,7 @@ stabilize INFO 1 [logterm: 2, index: 12, vote: 0] cast MsgPreVote for 2 [logterm: 2, index: 12] at term 2 > 3 receiving messages 2->3 MsgPreVote Term:3 Log:2/12 - INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 2 [logterm: 2, index: 12] at term 2: lease is not expired (remaining ticks: 3) + INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgPreVote from 2 [logterm: 2, index: 12] at term 2: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 3 at epoch 1 > 1 handling Ready Ready MustSync=false: Messages: @@ -270,7 +298,7 @@ stabilize > 2 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVote Term:3 Log:2/12 2->3 MsgVote Term:3 Log:2/12 @@ -283,11 +311,11 @@ stabilize INFO 1 [logterm: 2, index: 12, vote: 0] cast MsgVote for 2 [logterm: 2, index: 12] at term 3 > 3 receiving messages 2->3 MsgVote Term:3 Log:2/12 - INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgVote from 2 [logterm: 2, index: 12] at term 2: lease is not expired (remaining ticks: 3) + INFO 3 [logterm: 2, index: 12, vote: 3] ignored MsgVote from 2 [logterm: 2, index: 12] at term 2: recently received communication from leader (remaining ticks: 3) and supporting fortified leader 3 at epoch 1 > 1 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:1 + HardState Term:3 Vote:2 Commit:12 Lead:0 LeadEpoch:0 Messages: 1->2 MsgVoteResp Term:3 Log:0/0 > 2 receiving messages @@ -337,7 +365,7 @@ stabilize 3->2 MsgFortifyLeaderResp Term:3 Log:0/0 LeadEpoch:1 3->2 MsgAppResp Term:3 Log:0/13 Commit:12 > 2 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Vote:2 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 3/13 EntryNormal "" @@ -349,14 +377,14 @@ stabilize > 3 receiving messages 2->3 MsgApp Term:3 Log:3/13 Commit:13 > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Vote:2 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 3/13 EntryNormal "" Messages: 1->2 MsgAppResp Term:3 Log:0/13 Commit:13 > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:3 Commit:13 Lead:2 LeadEpoch:1 CommittedEntries: 3/13 EntryNormal "" diff --git a/pkg/raft/testdata/probe_and_replicate.txt b/pkg/raft/testdata/probe_and_replicate.txt index 2d738fdd47da..fb9346a0c579 100644 --- a/pkg/raft/testdata/probe_and_replicate.txt +++ b/pkg/raft/testdata/probe_and_replicate.txt @@ -58,6 +58,10 @@ stabilize ---- ok +bump-epoch 1 +---- +ok + ## Create term 2 entries. campaign 2 ---- @@ -91,6 +95,10 @@ deliver-msgs drop=(1,2,3,4,5,6,7) ---- ok +bump-epoch 2 +---- +ok + ## Create term 3 entries. campaign 7 ---- @@ -132,6 +140,10 @@ deliver-msgs drop=(1,2,3,4,5,6,7) ---- ok +bump-epoch 7 +---- +ok + ## Create term 4 entries. campaign 6 ---- @@ -165,6 +177,10 @@ deliver-msgs drop=(1,2,3,4,5,6,7) ---- ok +bump-epoch 6 +---- +ok + ## Create term 5 entries. campaign 5 ---- @@ -186,6 +202,10 @@ deliver-msgs drop=(1,2,3,4,5,6,7) ---- ok +bump-epoch 5 +---- +ok + ## Create term 6 entries. campaign 4 ---- @@ -223,6 +243,10 @@ deliver-msgs drop=(1,2,3,4,5,6,7) ---- ok +bump-epoch 4 +---- +ok + ## Create term 7 entries. campaign 5 ---- @@ -351,6 +375,16 @@ raft-log 7 3/20 EntryNormal "prop_3_20" 3/21 EntryNormal "prop_3_21" +bump-epoch 5 +---- + 1 2 3 4 5 6 7 +1 2 2 1 2 3 2 2 +2 2 2 1 2 3 2 2 +3 2 2 1 2 3 2 2 +4 2 2 1 2 3 2 2 +5 2 2 1 2 3 2 2 +6 2 2 1 2 3 2 2 +7 2 2 1 2 3 2 2 # Elect node 1 as leader and stabilize. campaign 1 @@ -370,7 +404,7 @@ stabilize 1 > 1 handling Ready Ready MustSync=true: State:StateCandidate - HardState Term:8 Vote:1 Commit:18 Lead:0 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:18 Lead:0 LeadEpoch:0 Messages: 1->2 MsgVote Term:8 Log:6/20 1->3 MsgVote Term:8 Log:6/20 @@ -415,34 +449,34 @@ stabilize 2 3 4 5 6 7 INFO 7 [logterm: 3, index: 21, vote: 0] cast MsgVote for 1 [logterm: 6, index: 20] at term 8 > 2 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:18 Lead:0 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:18 Lead:0 LeadEpoch:0 Messages: 2->1 MsgVoteResp Term:8 Log:0/0 > 3 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:14 Lead:0 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:14 Lead:0 LeadEpoch:0 Messages: 3->1 MsgVoteResp Term:8 Log:0/0 > 4 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:8 Commit:18 Lead:0 LeadEpoch:1 + HardState Term:8 Commit:18 Lead:0 LeadEpoch:0 Messages: 4->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0) > 5 handling Ready Ready MustSync=true: State:StateFollower - HardState Term:8 Commit:18 Lead:0 LeadEpoch:1 + HardState Term:8 Commit:18 Lead:0 LeadEpoch:0 Messages: 5->1 MsgVoteResp Term:8 Log:0/0 Rejected (Hint: 0) > 6 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:15 Lead:0 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:15 Lead:0 LeadEpoch:0 Messages: 6->1 MsgVoteResp Term:8 Log:0/0 > 7 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:13 Lead:0 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:13 Lead:0 LeadEpoch:0 Messages: 7->1 MsgVoteResp Term:8 Log:0/0 @@ -469,7 +503,7 @@ stabilize 1 > 1 handling Ready Ready MustSync=true: State:StateLeader - HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:2 Entries: 8/21 EntryNormal "" Messages: @@ -494,12 +528,12 @@ stabilize 1 2 1->2 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""] > 2 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:2 Messages: - 2->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 2->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 2->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 19) Commit:18 > 1 receiving messages - 2->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 2->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 2->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 19) Commit:18 > 1 handling Ready Ready MustSync=false: @@ -530,12 +564,12 @@ stabilize 1 3 1->3 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""] > 3 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:14 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:14 Lead:1 LeadEpoch:2 Messages: - 3->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 3->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 3->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 14) Commit:14 > 1 receiving messages - 3->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 3->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 3->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 14) Commit:14 > 1 handling Ready Ready MustSync=false: @@ -561,7 +595,7 @@ stabilize 1 3 ] > 3 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:18 Lead:1 LeadEpoch:2 Entries: 4/15 EntryNormal "prop_4_15" 5/16 EntryNormal "" @@ -589,18 +623,18 @@ stabilize 1 4 INFO replace the unstable entries from index 21 > 4 handling Ready Ready MustSync=true: - HardState Term:8 Commit:18 Lead:1 LeadEpoch:1 + HardState Term:8 Commit:18 Lead:1 LeadEpoch:2 Entries: 8/21 EntryNormal "" Messages: - 4->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 4->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 4->1 MsgAppResp Term:8 Log:0/21 Commit:18 > 1 receiving messages - 4->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 4->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 4->1 MsgAppResp Term:8 Log:0/21 Commit:18 > 1 handling Ready - Ready MustSync=false: - HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:1 + Ready MustSync=true: + HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:2 CommittedEntries: 6/19 EntryNormal "prop_6_19" 6/20 EntryNormal "prop_6_20" @@ -612,8 +646,8 @@ stabilize 1 4 > 4 receiving messages 1->4 MsgApp Term:8 Log:8/21 Commit:21 > 4 handling Ready - Ready MustSync=false: - HardState Term:8 Commit:21 Lead:1 LeadEpoch:1 + Ready MustSync=true: + HardState Term:8 Commit:21 Lead:1 LeadEpoch:2 CommittedEntries: 6/19 EntryNormal "prop_6_19" 6/20 EntryNormal "prop_6_20" @@ -630,12 +664,12 @@ stabilize 1 5 1->5 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""] > 5 handling Ready Ready MustSync=true: - HardState Term:8 Commit:18 Lead:1 LeadEpoch:1 + HardState Term:8 Commit:18 Lead:1 LeadEpoch:2 Messages: - 5->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 5->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 5->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 18) Commit:18 > 1 receiving messages - 5->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 5->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 5->1 MsgAppResp Term:8 Log:6/20 Rejected (Hint: 18) Commit:18 > 1 handling Ready Ready MustSync=false: @@ -655,7 +689,7 @@ stabilize 1 5 INFO replace the unstable entries from index 19 > 5 handling Ready Ready MustSync=true: - HardState Term:8 Commit:21 Lead:1 LeadEpoch:1 + HardState Term:8 Commit:21 Lead:1 LeadEpoch:2 Entries: 6/19 EntryNormal "prop_6_19" 6/20 EntryNormal "prop_6_20" @@ -676,12 +710,12 @@ stabilize 1 6 1->6 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""] > 6 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:15 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:15 Lead:1 LeadEpoch:2 Messages: - 6->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 6->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 6->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 17) Commit:15 > 1 receiving messages - 6->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 6->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 6->1 MsgAppResp Term:8 Log:4/20 Rejected (Hint: 17) Commit:15 > 1 handling Ready Ready MustSync=false: @@ -707,7 +741,7 @@ stabilize 1 6 INFO replace the unstable entries from index 16 > 6 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:2 Entries: 5/16 EntryNormal "" 5/17 EntryNormal "prop_5_17" @@ -734,12 +768,12 @@ stabilize 1 7 1->7 MsgApp Term:8 Log:6/20 Commit:18 Entries:[8/21 EntryNormal ""] > 7 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:13 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:13 Lead:1 LeadEpoch:2 Messages: - 7->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 7->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 7->1 MsgAppResp Term:8 Log:3/20 Rejected (Hint: 20) Commit:13 > 1 receiving messages - 7->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:1 + 7->1 MsgFortifyLeaderResp Term:8 Log:0/0 LeadEpoch:2 7->1 MsgAppResp Term:8 Log:3/20 Rejected (Hint: 20) Commit:13 > 1 handling Ready Ready MustSync=false: @@ -769,7 +803,7 @@ stabilize 1 7 INFO replace the unstable entries from index 14 > 7 handling Ready Ready MustSync=true: - HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:1 + HardState Term:8 Vote:1 Commit:21 Lead:1 LeadEpoch:2 Entries: 4/14 EntryNormal "" 4/15 EntryNormal "prop_4_15" diff --git a/pkg/raft/testdata/replicate_pause.txt b/pkg/raft/testdata/replicate_pause.txt index 11cfff7190ab..7ee9a71e24f8 100644 --- a/pkg/raft/testdata/replicate_pause.txt +++ b/pkg/raft/testdata/replicate_pause.txt @@ -46,9 +46,9 @@ ok # Expect that in-flight tracking to nodes 2 and 3 is saturated. status 1 ---- -1: StateReplicate match=14 next=15 -2: StateReplicate match=11 next=15 paused inflight=3[full] -3: StateReplicate match=11 next=15 paused inflight=3[full] +1: StateReplicate match=14 next=15 sentCommit=11 matchCommit=11 +2: StateReplicate match=11 next=15 sentCommit=11 matchCommit=11 paused inflight=3[full] +3: StateReplicate match=11 next=15 sentCommit=11 matchCommit=11 paused inflight=3[full] log-level none ---- @@ -66,9 +66,9 @@ ok # Expect that the entries are committed and stored on nodes 1 and 2. status 1 ---- -1: StateReplicate match=14 next=15 -2: StateReplicate match=14 next=15 -3: StateReplicate match=11 next=15 paused inflight=3[full] +1: StateReplicate match=14 next=15 sentCommit=11 matchCommit=11 +2: StateReplicate match=14 next=15 sentCommit=14 matchCommit=14 +3: StateReplicate match=11 next=15 sentCommit=14 matchCommit=11 paused inflight=3[full] # Drop append messages to node 3. deliver-msgs drop=3 @@ -97,9 +97,9 @@ ok # In-flight tracking to nodes 2 and 3 is saturated, but node 3 is behind. status 1 ---- -1: StateReplicate match=14 next=18 -2: StateReplicate match=14 next=18 paused inflight=3[full] -3: StateReplicate match=11 next=15 paused inflight=3[full] +1: StateReplicate match=14 next=18 sentCommit=11 matchCommit=11 +2: StateReplicate match=14 next=18 sentCommit=14 matchCommit=14 paused inflight=3[full] +3: StateReplicate match=11 next=15 sentCommit=14 matchCommit=11 paused inflight=3[full] log-level none ---- @@ -117,9 +117,9 @@ ok # Expect that the entries are committed and stored only on nodes 1 and 2. status 1 ---- -1: StateReplicate match=17 next=18 -2: StateReplicate match=17 next=18 -3: StateReplicate match=11 next=15 paused inflight=3[full] +1: StateReplicate match=17 next=18 sentCommit=14 matchCommit=14 +2: StateReplicate match=17 next=18 sentCommit=17 matchCommit=17 +3: StateReplicate match=11 next=15 sentCommit=14 matchCommit=11 paused inflight=3[full] # Make a heartbeat roundtrip. tick-heartbeat 1 @@ -131,15 +131,15 @@ stabilize 1 > 1 handling Ready Ready MustSync=false: Messages: - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:17 - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 + 1->3 MsgHeartbeat Term:1 Log:0/0 stabilize 2 3 ---- > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:17 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 3 receiving messages - 1->3 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->3 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=false: Messages: @@ -188,6 +188,6 @@ ok # Eventually all nodes catch up on the committed state. status 1 ---- -1: StateReplicate match=17 next=18 -2: StateReplicate match=17 next=18 -3: StateReplicate match=17 next=18 +1: StateReplicate match=17 next=18 sentCommit=14 matchCommit=14 +2: StateReplicate match=17 next=18 sentCommit=17 matchCommit=17 +3: StateReplicate match=17 next=18 sentCommit=17 matchCommit=17 diff --git a/pkg/raft/testdata/single_node.txt b/pkg/raft/testdata/single_node.txt index f819261495b8..428a8c18a52e 100644 --- a/pkg/raft/testdata/single_node.txt +++ b/pkg/raft/testdata/single_node.txt @@ -29,7 +29,7 @@ stabilize Entries: 1/4 EntryNormal "" > 1 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Vote:1 Commit:4 Lead:1 LeadEpoch:1 CommittedEntries: 1/4 EntryNormal "" diff --git a/pkg/raft/testdata/slow_follower_after_compaction.txt b/pkg/raft/testdata/slow_follower_after_compaction.txt index 2ce02adae5c6..83a94fbbb6b0 100644 --- a/pkg/raft/testdata/slow_follower_after_compaction.txt +++ b/pkg/raft/testdata/slow_follower_after_compaction.txt @@ -43,9 +43,9 @@ ok # All nodes up-to-date. status 1 ---- -1: StateReplicate match=14 next=15 -2: StateReplicate match=14 next=15 -3: StateReplicate match=14 next=15 +1: StateReplicate match=14 next=15 sentCommit=11 matchCommit=11 +2: StateReplicate match=14 next=15 sentCommit=14 matchCommit=14 +3: StateReplicate match=14 next=15 sentCommit=14 matchCommit=14 log-level none ---- @@ -79,9 +79,9 @@ ok # Nodes 1 and 2 up-to-date, 3 is behind and MsgApp flow is throttled. status 1 ---- -1: StateReplicate match=18 next=19 -2: StateReplicate match=18 next=19 -3: StateReplicate match=14 next=17 paused inflight=2[full] +1: StateReplicate match=18 next=19 sentCommit=14 matchCommit=14 +2: StateReplicate match=18 next=19 sentCommit=18 matchCommit=18 +3: StateReplicate match=14 next=17 sentCommit=16 matchCommit=14 paused inflight=2[full] # Break the MsgApp flow from the leader to node 3. deliver-msgs drop=3 @@ -118,6 +118,6 @@ ok # All nodes caught up. status 1 ---- -1: StateReplicate match=18 next=19 -2: StateReplicate match=18 next=19 -3: StateReplicate match=18 next=19 +1: StateReplicate match=18 next=19 sentCommit=14 matchCommit=14 +2: StateReplicate match=18 next=19 sentCommit=18 matchCommit=18 +3: StateReplicate match=18 next=19 sentCommit=18 matchCommit=18 diff --git a/pkg/raft/testdata/snapshot_succeed_via_app_resp.txt b/pkg/raft/testdata/snapshot_succeed_via_app_resp.txt index 3390ae47a0cd..9c96104e3dc6 100644 --- a/pkg/raft/testdata/snapshot_succeed_via_app_resp.txt +++ b/pkg/raft/testdata/snapshot_succeed_via_app_resp.txt @@ -41,9 +41,9 @@ ok status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateProbe match=0 next=11 paused inactive +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateProbe match=0 next=11 sentCommit=10 matchCommit=0 paused inactive # Add the node that will receive a snapshot (it has no state at all, does not # even have a config). @@ -63,7 +63,7 @@ process-ready 1 ---- Ready MustSync=false: Messages: -1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 +1->2 MsgHeartbeat Term:1 Log:0/0 1->3 MsgHeartbeat Term:1 Log:0/0 # Iterate until no more work is done by the new peer. It receives the heartbeat @@ -85,8 +85,8 @@ stabilize 1 ---- > 1 receiving messages 3->1 MsgHeartbeatResp Term:1 Log:0/0 - DEBUG 1 [firstindex: 12, commit: 11] sent snapshot[index: 11, term: 1] to 3 [StateProbe match=0 next=11] - DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=12 paused pendingSnap=11] + DEBUG 1 [firstindex: 12, commit: 11] sent snapshot[index: 11, term: 1] to 3 [StateProbe match=0 next=11 sentCommit=10 matchCommit=0] + DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=12 sentCommit=11 matchCommit=0 paused pendingSnap=11] > 1 handling Ready Ready MustSync=false: Messages: @@ -95,9 +95,9 @@ stabilize 1 status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateSnapshot match=0 next=12 paused pendingSnap=11 +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateSnapshot match=0 next=12 sentCommit=11 matchCommit=0 paused pendingSnap=11 # Follower applies the snapshot. Note how it reacts with a MsgAppResp upon completion. # The snapshot fully catches the follower up (i.e. there are no more log entries it @@ -113,7 +113,7 @@ stabilize 3 INFO 3 [commit: 11, lastindex: 11, lastterm: 1] restored snapshot [index: 11, term: 1] INFO 3 [commit: 11] restored snapshot [index: 11, term: 1] > 3 handling Ready - Ready MustSync=false: + Ready MustSync=true: HardState Term:1 Commit:11 Lead:1 LeadEpoch:0 Snapshot Index:11 Term:1 ConfState:Voters:[1 2 3] VotersOutgoing:[] Learners:[] LearnersNext:[] AutoLeave:false Messages: @@ -125,19 +125,19 @@ stabilize 1 ---- > 1 receiving messages 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=11 next=12 paused pendingSnap=11] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=11 next=12 sentCommit=11 matchCommit=11 paused pendingSnap=11] status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateReplicate match=11 next=12 +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 # Let things settle. stabilize ---- > 2 receiving messages - 1->2 MsgHeartbeat Term:1 Log:0/0 Commit:11 + 1->2 MsgHeartbeat Term:1 Log:0/0 > 2 handling Ready Ready MustSync=false: Messages: diff --git a/pkg/raft/testdata/snapshot_succeed_via_app_resp_behind.txt b/pkg/raft/testdata/snapshot_succeed_via_app_resp_behind.txt index c683de2c9769..c28ea9e1ffcb 100644 --- a/pkg/raft/testdata/snapshot_succeed_via_app_resp_behind.txt +++ b/pkg/raft/testdata/snapshot_succeed_via_app_resp_behind.txt @@ -48,9 +48,9 @@ raft-state status 1 ---- -1: StateReplicate match=11 next=12 -2: StateReplicate match=11 next=12 -3: StateProbe match=0 next=11 paused inactive +1: StateReplicate match=11 next=12 sentCommit=10 matchCommit=10 +2: StateReplicate match=11 next=12 sentCommit=11 matchCommit=11 +3: StateProbe match=0 next=11 sentCommit=10 matchCommit=0 paused inactive raft-log 3 ---- @@ -83,9 +83,9 @@ ok status 1 ---- -1: StateReplicate match=12 next=13 -2: StateReplicate match=12 next=13 -3: StateProbe match=0 next=11 paused inactive +1: StateReplicate match=12 next=13 sentCommit=11 matchCommit=11 +2: StateReplicate match=12 next=13 sentCommit=12 matchCommit=12 +3: StateProbe match=0 next=11 sentCommit=10 matchCommit=0 paused inactive # 3 now gets the first MsgApp the leader originally sent, trying to append entry # 11 but this is rejected because the follower's log started at index 5. @@ -123,9 +123,9 @@ stabilize 1 > 1 receiving messages 3->1 MsgAppResp Term:1 Log:1/10 Rejected (Hint: 5) Commit:5 DEBUG 1 received MsgAppResp(rejected, hint: (index 5, term 1)) from 3 for index 10 - DEBUG 1 decreased progress of 3 to [StateProbe match=0 next=6] - DEBUG 1 [firstindex: 11, commit: 12] sent snapshot[index: 12, term: 1] to 3 [StateProbe match=0 next=6] - DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=13 paused pendingSnap=12] + DEBUG 1 decreased progress of 3 to [StateProbe match=0 next=6 sentCommit=5 matchCommit=5] + DEBUG 1 [firstindex: 11, commit: 12] sent snapshot[index: 12, term: 1] to 3 [StateProbe match=0 next=6 sentCommit=5 matchCommit=5] + DEBUG 1 paused sending replication messages to 3 [StateSnapshot match=0 next=13 sentCommit=12 matchCommit=5 paused pendingSnap=12] > 1 handling Ready Ready MustSync=false: Messages: @@ -155,7 +155,7 @@ stabilize 1 > 1 receiving messages 3->1 MsgFortifyLeaderResp Term:1 Log:0/0 LeadEpoch:1 3->1 MsgAppResp Term:1 Log:0/11 Commit:11 - DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=11 next=13 paused pendingSnap=12] + DEBUG 1 recovered from needing snapshot, resumed sending replication messages to 3 [StateSnapshot match=11 next=13 sentCommit=12 matchCommit=11 paused pendingSnap=12] > 1 handling Ready Ready MustSync=false: Messages: @@ -165,6 +165,6 @@ stabilize 1 # This is despite its PendingSnapshot having been 12. status 1 ---- -1: StateReplicate match=12 next=13 -2: StateReplicate match=12 next=13 -3: StateReplicate match=11 next=13 inflight=1 +1: StateReplicate match=12 next=13 sentCommit=11 matchCommit=11 +2: StateReplicate match=12 next=13 sentCommit=12 matchCommit=12 +3: StateReplicate match=11 next=13 sentCommit=12 matchCommit=11 inflight=1 diff --git a/pkg/raft/tracker/BUILD.bazel b/pkg/raft/tracker/BUILD.bazel index b82995d048c5..02db000787f7 100644 --- a/pkg/raft/tracker/BUILD.bazel +++ b/pkg/raft/tracker/BUILD.bazel @@ -8,12 +8,15 @@ go_library( "progress.go", "progresstracker.go", "state.go", + "supporttracker.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/raft/tracker", visibility = ["//visibility:public"], deps = [ "//pkg/raft/quorum", "//pkg/raft/raftpb", + "//pkg/raft/raftstoreliveness", + "//pkg/util/hlc", ], ) @@ -22,9 +25,16 @@ go_test( srcs = [ "inflights_test.go", "progress_test.go", + "supporttracker_test.go", ], embed = [":tracker"], deps = [ + "//pkg/raft/quorum", + "//pkg/raft/raftpb", + "//pkg/raft/raftstoreliveness", + "//pkg/util/hlc", + "//pkg/util/leaktest", + "//pkg/util/log", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], diff --git a/pkg/raft/tracker/electiontracker.go b/pkg/raft/tracker/electiontracker.go index f97f823f7ce7..77a28997faf0 100644 --- a/pkg/raft/tracker/electiontracker.go +++ b/pkg/raft/tracker/electiontracker.go @@ -23,7 +23,7 @@ type ElectionTracker struct { votes map[pb.PeerID]bool } -func MakeVoteTracker(config *quorum.Config) ElectionTracker { +func MakeElectionTracker(config *quorum.Config) ElectionTracker { return ElectionTracker{ config: config, votes: map[pb.PeerID]bool{}, diff --git a/pkg/raft/tracker/progress.go b/pkg/raft/tracker/progress.go index bb0c896789ca..07d9ae9a9cfc 100644 --- a/pkg/raft/tracker/progress.go +++ b/pkg/raft/tracker/progress.go @@ -48,13 +48,21 @@ type Progress struct { // In StateSnapshot, Next == PendingSnapshot + 1. Next uint64 - // sentCommit is the highest commit index in flight to the follower. + // SentCommit is the highest commit index in flight to the follower. // // Generally, it is monotonic, but con regress in some cases, e.g. when // converting to `StateProbe` or when receiving a rejection from a follower. // - // In StateSnapshot, sentCommit == PendingSnapshot == Next-1. - sentCommit uint64 + // In StateSnapshot, SentCommit == PendingSnapshot == Next-1. + SentCommit uint64 + + // MatchCommit is the commit index at which the follower is known to match the + // leader. It is durable on the follower. + // Best-effort invariant: MatchCommit <= SentCommit + // It's a best-effort invariant because it doesn't really affect correctness. + // The worst case if MatchCommit > SentCommit is that the leader will send + // and extra MsgApp to the follower. + MatchCommit uint64 // State defines how the leader should interact with the follower. // @@ -147,7 +155,7 @@ func (pr *Progress) BecomeProbe() { pr.ResetState(StateProbe) pr.Next = pr.Match + 1 } - pr.sentCommit = min(pr.sentCommit, pr.Next-1) + pr.SentCommit = min(pr.SentCommit, pr.Next-1) } // BecomeReplicate transitions into StateReplicate, resetting Next to Match+1. @@ -162,7 +170,7 @@ func (pr *Progress) BecomeSnapshot(snapshoti uint64) { pr.ResetState(StateSnapshot) pr.PendingSnapshot = snapshoti pr.Next = snapshoti + 1 - pr.sentCommit = snapshoti + pr.SentCommit = snapshoti } // SentEntries updates the progress on the given number of consecutive entries @@ -193,12 +201,22 @@ func (pr *Progress) CanBumpCommit(index uint64) bool { // Next-1 in normal operation, or higher in some rare cases. Allow sending a // commit index eagerly only if we haven't already sent one that bumps the // follower's commit all the way to Next-1. - return index > pr.sentCommit && pr.sentCommit < pr.Next-1 + return index > pr.SentCommit && pr.SentCommit < pr.Next-1 +} + +// IsFollowerCommitStale returns true if the follower's commit index it less +// than index. +// If the follower's commit index+1 is pr.Next, it means that sending a larger +// commit index won't change anything, therefore we don't send it. +func (pr *Progress) IsFollowerCommitStale(index uint64) bool { + return index > pr.MatchCommit && pr.MatchCommit+1 < pr.Next } -// SentCommit updates the sentCommit. -func (pr *Progress) SentCommit(commit uint64) { - pr.sentCommit = commit +// MaybeUpdateSentCommit updates the SentCommit if it needs to be updated. +func (pr *Progress) MaybeUpdateSentCommit(commit uint64) { + if commit > pr.SentCommit { + pr.SentCommit = commit + } } // MaybeUpdate is called when an MsgAppResp arrives from the follower, with the @@ -213,6 +231,15 @@ func (pr *Progress) MaybeUpdate(n uint64) bool { return true } +// MaybeUpdateMatchCommit updates the match commit from a follower if it's +// larger than the previous match commit. +func (pr *Progress) MaybeUpdateMatchCommit(commit uint64) { + if commit > pr.MatchCommit { + pr.MatchCommit = commit + pr.SentCommit = max(pr.SentCommit, commit) // Best-effort invariant: SentCommit >= MatchCommit + } +} + // MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The // arguments are the index of the append message rejected by the follower, and // the hint that we want to decrease to. @@ -235,8 +262,8 @@ func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool { // // TODO(tbg): why not use matchHint if it's larger? pr.Next = pr.Match + 1 - // Regress the sentCommit since it unlikely has been applied. - pr.sentCommit = min(pr.sentCommit, pr.Next-1) + // Regress the SentCommit since it unlikely has been applied. + pr.SentCommit = min(pr.SentCommit, pr.Next-1) return true } @@ -248,8 +275,8 @@ func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool { } pr.Next = max(min(rejected, matchHint+1), pr.Match+1) - // Regress the sentCommit since it unlikely has been applied. - pr.sentCommit = min(pr.sentCommit, pr.Next-1) + // Regress the SentCommit since it unlikely has been applied. + pr.SentCommit = min(pr.SentCommit, pr.Next-1) pr.MsgAppProbesPaused = false return true } @@ -295,7 +322,11 @@ func (pr *Progress) IsPaused() bool { // to guarantee that eventually the flow is either accepted or rejected. // // In StateSnapshot, we do not send append messages. -func (pr *Progress) ShouldSendMsgApp(last, commit uint64) bool { +// +// If advanceCommit is true, it means that MsgApp owns the responsibility of +// closing the followers' commit index gap even if some MsgApp messages gets +// dropped. If it's false, it means that the responsibility is on MsgHeartbeat. +func (pr *Progress) ShouldSendMsgApp(last, commit uint64, advanceCommit bool) bool { switch pr.State { case StateProbe: return !pr.MsgAppProbesPaused @@ -320,7 +351,26 @@ func (pr *Progress) ShouldSendMsgApp(last, commit uint64) bool { // - our commit index exceeds the in-flight commit index, and // - sending it can commit at least one of the follower's entries // (including the ones still in flight to it). - return pr.CanBumpCommit(commit) + if pr.CanBumpCommit(commit) { + return true + } + + // Send an empty MsgApp containing the latest commit index if we know that + // the follower's commit index is stale and we haven't recently sent a + // MsgApp (according to the MsgAppProbesPaused flag). + + // NOTE: This is a different condition than the one above because we only + // send this message if pr.MsgAppProbesPaused is false. After this message, + // pr.MsgAppProbesPaused will be set to true until we receive a heartbeat + // response from the follower. In contrast, the condition above can keep + // sending empty MsgApps eagerly until we have sent the latest commit index + // to the follower. + // TODO(iskettaneh): Remove the dependency on MsgAppProbesPaused to send + // MsgApps. + if advanceCommit { + return pr.IsFollowerCommitStale(commit) && !pr.MsgAppProbesPaused + } + return false case StateSnapshot: return false @@ -331,7 +381,8 @@ func (pr *Progress) ShouldSendMsgApp(last, commit uint64) bool { func (pr *Progress) String() string { var buf strings.Builder - fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + fmt.Fprintf(&buf, "%s match=%d next=%d sentCommit=%d matchCommit=%d", pr.State, pr.Match, + pr.Next, pr.SentCommit, pr.MatchCommit) if pr.IsLearner { fmt.Fprint(&buf, " learner") } diff --git a/pkg/raft/tracker/progress_test.go b/pkg/raft/tracker/progress_test.go index 5465af5559d0..a3edbc839c78 100644 --- a/pkg/raft/tracker/progress_test.go +++ b/pkg/raft/tracker/progress_test.go @@ -27,8 +27,10 @@ func TestProgressString(t *testing.T) { ins := NewInflights(1, 0) ins.Add(123, 1) pr := &Progress{ - Match: 1, - Next: 2, + MatchCommit: 1, + SentCommit: 2, + Match: 3, + Next: 4, State: StateSnapshot, PendingSnapshot: 123, RecentActive: false, @@ -36,7 +38,8 @@ func TestProgressString(t *testing.T) { IsLearner: true, Inflights: ins, } - const exp = `StateSnapshot match=1 next=2 learner paused pendingSnap=123 inactive inflight=1[full]` + const exp = "StateSnapshot match=3 next=4 sentCommit=2 matchCommit=1 learner paused " + + "pendingSnap=123 inactive inflight=1[full]" assert.Equal(t, exp, pr.String()) } diff --git a/pkg/raft/tracker/supporttracker.go b/pkg/raft/tracker/supporttracker.go new file mode 100644 index 000000000000..02b231d7aea4 --- /dev/null +++ b/pkg/raft/tracker/supporttracker.go @@ -0,0 +1,99 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tracker + +import ( + "fmt" + "slices" + "strings" + + "github.com/cockroachdb/cockroach/pkg/raft/quorum" + pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" + "github.com/cockroachdb/cockroach/pkg/util/hlc" +) + +// SupportTracker is used to track fortification support from peers. This can +// then be used to compute until when a leader's support expires. +type SupportTracker struct { + config *quorum.Config + storeLiveness raftstoreliveness.StoreLiveness + + // support contains a map of nodes which have supported the leader through + // fortification handshakes, and the corresponding Store Liveness epochs that + // they have supported the leader in. + support map[pb.PeerID]pb.Epoch +} + +// MakeSupportTracker initializes a SupportTracker. +func MakeSupportTracker( + config *quorum.Config, storeLiveness raftstoreliveness.StoreLiveness, +) SupportTracker { + st := SupportTracker{ + config: config, + storeLiveness: storeLiveness, + support: map[pb.PeerID]pb.Epoch{}, + } + return st +} + +// RecordSupport records that the node with the given id supported this Raft +// instance until the supplied timestamp. +func (st *SupportTracker) RecordSupport(id pb.PeerID, epoch pb.Epoch) { + // The supported epoch should never regress. Guard against out of order + // delivery of fortify responses by using max. + st.support[id] = max(st.support[id], epoch) +} + +// Reset clears out any previously tracked support. +func (st *SupportTracker) Reset() { + clear(st.support) + // TODO(arul): when we introduce st.LeadSupportUntil we need to make sure it + // isn't reset here, because we don't want it to regress when a leader steps + // down. +} + +// LeadSupportUntil returns the timestamp until which the leader is guaranteed +// support until based on the support being tracked for it by its peers. +func (st *SupportTracker) LeadSupportUntil() hlc.Timestamp { + // TODO(arul): avoid this map allocation as we're calling LeadSupportUntil + // from hot paths. + supportExpMap := make(map[pb.PeerID]hlc.Timestamp) + for id, supportEpoch := range st.support { + curEpoch, curExp, ok := st.storeLiveness.SupportFrom(id) + // NB: We can't assert that supportEpoch <= curEpoch because there may be a + // race between a successful MsgFortifyLeaderResp and the store liveness + // heartbeat response that lets the leader know the follower's store is + // supporting the leader's store at the epoch in the MsgFortifyLeaderResp + // message. + if ok && curEpoch == supportEpoch { + supportExpMap[id] = curExp + } + } + return st.config.Voters.LeadSupportExpiration(supportExpMap) +} + +func (st *SupportTracker) String() string { + if len(st.support) == 0 { + return "empty" + } + // Print the map in sorted order as we assert on its output in tests. + ids := make([]pb.PeerID, 0, len(st.support)) + for id := range st.support { + ids = append(ids, id) + } + slices.Sort(ids) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d : %d\n", id, st.support[id]) + } + return buf.String() +} diff --git a/pkg/raft/tracker/supporttracker_test.go b/pkg/raft/tracker/supporttracker_test.go new file mode 100644 index 000000000000..36f52cea1a33 --- /dev/null +++ b/pkg/raft/tracker/supporttracker_test.go @@ -0,0 +1,187 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tracker + +import ( + "testing" + + "github.com/cockroachdb/cockroach/pkg/raft/quorum" + pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb" + "github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +func TestLeadSupportUntil(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ts := func(ts int64) hlc.Timestamp { + return hlc.Timestamp{ + WallTime: ts, + } + } + + mockLiveness3Peers := makeMockStoreLiveness( + map[pb.PeerID]mockLivenessEntry{ + 1: makeMockLivenessEntry(10, ts(10)), + 2: makeMockLivenessEntry(20, ts(15)), + 3: makeMockLivenessEntry(30, ts(20)), + }, + ) + + testCases := []struct { + ids []pb.PeerID + storeLiveness raftstoreliveness.StoreLiveness + setup func(tracker *SupportTracker) + expTS hlc.Timestamp + }{ + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + // No support recorded. + }, + expTS: hlc.Timestamp{}, + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + supportTracker.RecordSupport(1, 10) + }, + expTS: hlc.Timestamp{}, + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + supportTracker.RecordSupport(1, 10) + supportTracker.RecordSupport(3, 30) + }, + expTS: ts(10), + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + supportTracker.RecordSupport(1, 10) + supportTracker.RecordSupport(3, 30) + supportTracker.RecordSupport(2, 20) + }, + expTS: ts(15), + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + // Record support at epochs at expired epochs. + supportTracker.RecordSupport(1, 9) + supportTracker.RecordSupport(3, 29) + supportTracker.RecordSupport(2, 19) + }, + expTS: hlc.Timestamp{}, + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + // Record support at newer epochs than what are present in + // StoreLiveness. + // + // NB: This is possible if there is a race between store liveness + // heartbeats updates and fortification responses. + supportTracker.RecordSupport(1, 11) + supportTracker.RecordSupport(3, 31) + supportTracker.RecordSupport(2, 21) + }, + expTS: hlc.Timestamp{}, + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + // One of the epochs being supported is expired. + supportTracker.RecordSupport(1, 10) + supportTracker.RecordSupport(3, 29) // expired + supportTracker.RecordSupport(2, 20) + }, + expTS: ts(10), + }, + { + ids: []pb.PeerID{1, 2, 3}, + storeLiveness: mockLiveness3Peers, + setup: func(supportTracker *SupportTracker) { + // Two of the epochs being supported is expired. + supportTracker.RecordSupport(1, 10) + supportTracker.RecordSupport(3, 29) // expired + supportTracker.RecordSupport(2, 19) // expired + }, + expTS: hlc.Timestamp{}, + }, + } + + for _, tc := range testCases { + cfg := quorum.MakeEmptyConfig() + for _, id := range tc.ids { + cfg.Voters[0][id] = struct{}{} + } + supportTracker := MakeSupportTracker(&cfg, tc.storeLiveness) + + tc.setup(&supportTracker) + require.Equal(t, tc.expTS, supportTracker.LeadSupportUntil()) + } +} + +type mockLivenessEntry struct { + epoch pb.Epoch + ts hlc.Timestamp +} + +func makeMockLivenessEntry(epoch pb.Epoch, ts hlc.Timestamp) mockLivenessEntry { + return mockLivenessEntry{ + epoch: epoch, + ts: ts, + } +} + +type mockStoreLiveness struct { + liveness map[pb.PeerID]mockLivenessEntry +} + +func makeMockStoreLiveness(liveness map[pb.PeerID]mockLivenessEntry) mockStoreLiveness { + return mockStoreLiveness{ + liveness: liveness, + } +} + +// SupportFor implements the raftstoreliveness.StoreLiveness interface. +func (mockStoreLiveness) SupportFor(pb.PeerID) (pb.Epoch, bool) { + panic("unimplemented") +} + +// SupportFrom implements the raftstoreliveness.StoreLiveness interface. +func (m mockStoreLiveness) SupportFrom(id pb.PeerID) (pb.Epoch, hlc.Timestamp, bool) { + entry := m.liveness[id] + return entry.epoch, entry.ts, true +} + +// SupportFromEnabled implements the raftstoreliveness.StoreLiveness interface. +func (mockStoreLiveness) SupportFromEnabled() bool { + return true +} + +// SupportExpired implements the raftstoreliveness.StoreLiveness interface. +func (mockStoreLiveness) SupportExpired(hlc.Timestamp) bool { + panic("unimplemented") +} diff --git a/pkg/raft/types.go b/pkg/raft/types.go index 9c243c833afa..64dca2e03103 100644 --- a/pkg/raft/types.go +++ b/pkg/raft/types.go @@ -38,21 +38,27 @@ func pbEntryID(entry *pb.Entry) entryID { return entryID{term: entry.Term, index: entry.Index} } -// logMark is a position in a log consistent with the leader at a specific term. +// LogMark is a position in a log consistent with the leader at a specific term. // // This is different from entryID. The entryID ties an entry to the term of the -// leader who proposed it, while the logMark identifies an entry in a particular +// leader who proposed it, while the LogMark identifies an entry in a particular // leader's coordinate system. Different leaders can have different entries at a // particular index. // // Generally, all entries in raft form a tree (branching when a new leader -// starts proposing entries at its term). A logMark identifies a position in a +// starts proposing entries at its term). A LogMark identifies a position in a // particular branch of this tree. -type logMark struct { - // term is the term of the leader whose log is considered. - term uint64 - // index is the position in this leader's log. - index uint64 +type LogMark struct { + // Term is the term of the leader whose log is considered. + Term uint64 + // Index is the position in this leader's log. + Index uint64 +} + +// After returns true if the log mark logically happens after the other mark. +// This represents the order of log writes in raft. +func (l LogMark) After(other LogMark) bool { + return l.Term > other.Term || l.Term == other.Term && l.Index > other.Index } // logSlice describes a correct slice of a raft log. @@ -108,9 +114,9 @@ func (s logSlice) lastEntryID() entryID { return s.prev } -// mark returns the logMark identifying the end of this logSlice. -func (s logSlice) mark() logMark { - return logMark{term: s.term, index: s.lastIndex()} +// mark returns the LogMark identifying the end of this logSlice. +func (s logSlice) mark() LogMark { + return LogMark{Term: s.term, Index: s.lastIndex()} } // termAt returns the term of the entry at the given index. @@ -153,7 +159,7 @@ func (s logSlice) valid() error { // observed this committed state. // // Semantically, from the log perspective, this type is equivalent to a logSlice -// from 0 to lastEntryID(), plus a commit logMark. All leader logs at terms >= +// from 0 to lastEntryID(), plus a commit LogMark. All leader logs at terms >= // snapshot.term contain all entries up to the lastEntryID(). At earlier terms, // logs may or may not be consistent with this snapshot, depending on whether // they contain the lastEntryID(). @@ -186,10 +192,10 @@ func (s snapshot) lastEntryID() entryID { return entryID{term: s.snap.Metadata.Term, index: s.snap.Metadata.Index} } -// mark returns committed logMark of this snapshot, in the coordinate system of +// mark returns committed LogMark of this snapshot, in the coordinate system of // the leader who observes this committed state. -func (s snapshot) mark() logMark { - return logMark{term: s.term, index: s.snap.Metadata.Index} +func (s snapshot) mark() LogMark { + return LogMark{Term: s.term, Index: s.snap.Metadata.Index} } // valid returns nil iff the snapshot is well-formed. diff --git a/pkg/raft/types_test.go b/pkg/raft/types_test.go index 68d3ce48296b..2ab269a10906 100644 --- a/pkg/raft/types_test.go +++ b/pkg/raft/types_test.go @@ -92,7 +92,7 @@ func TestLogSlice(t *testing.T) { last := s.lastEntryID() require.Equal(t, tt.last, last) require.Equal(t, last.index, s.lastIndex()) - require.Equal(t, logMark{term: tt.term, index: last.index}, s.mark()) + require.Equal(t, LogMark{Term: tt.term, Index: last.index}, s.mark()) require.Equal(t, tt.prev.term, s.termAt(tt.prev.index)) for _, e := range tt.entries { @@ -164,7 +164,7 @@ func TestSnapshot(t *testing.T) { last := s.lastEntryID() require.Equal(t, tt.last, last) require.Equal(t, last.index, s.lastIndex()) - require.Equal(t, logMark{term: tt.term, index: last.index}, s.mark()) + require.Equal(t, LogMark{Term: tt.term, Index: last.index}, s.mark()) }) } } diff --git a/pkg/raft/util.go b/pkg/raft/util.go index 342d3545af59..a64e9db8bc8a 100644 --- a/pkg/raft/util.go +++ b/pkg/raft/util.go @@ -52,6 +52,14 @@ var isResponseMsg = [...]bool{ pb.MsgFortifyLeaderResp: true, } +var isMsgFromLeader = [...]bool{ + pb.MsgApp: true, + pb.MsgSnap: true, + pb.MsgHeartbeat: true, + pb.MsgFortifyLeader: true, + pb.MsgTimeoutNow: true, +} + func isMsgInArray(msgt pb.MessageType, arr []bool) bool { i := int(msgt) return i < len(arr) && arr[i] @@ -65,6 +73,10 @@ func IsResponseMsg(msgt pb.MessageType) bool { return isMsgInArray(msgt, isResponseMsg[:]) } +func IsMsgFromLeader(msgt pb.MessageType) bool { + return isMsgInArray(msgt, isMsgFromLeader[:]) +} + func IsLocalMsgTarget(id pb.PeerID) bool { return id == LocalAppendThread || id == LocalApplyThread } @@ -97,16 +109,10 @@ func DescribeSoftState(ss SoftState) string { return fmt.Sprintf("State:%s", ss.RaftState) } -func DescribeConfState(state pb.ConfState) string { - return fmt.Sprintf( - "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", - state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, - ) -} - func DescribeSnapshot(snap pb.Snapshot) string { m := snap.Metadata - return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", + m.Index, m.Term, m.ConfState.Describe()) } func DescribeReady(rd Ready, f EntryFormatter) string { diff --git a/pkg/raft/util_test.go b/pkg/raft/util_test.go index f6b1419a721b..9b54fb1723c0 100644 --- a/pkg/raft/util_test.go +++ b/pkg/raft/util_test.go @@ -147,6 +147,45 @@ func TestIsResponseMsg(t *testing.T) { } } +func TestMsgFromLeader(t *testing.T) { + tests := []struct { + msgt pb.MessageType + isResponse bool + }{ + {pb.MsgHup, false}, + {pb.MsgBeat, false}, + {pb.MsgUnreachable, false}, + {pb.MsgSnapStatus, false}, + {pb.MsgCheckQuorum, false}, + {pb.MsgTransferLeader, false}, + {pb.MsgProp, false}, + {pb.MsgApp, true}, + {pb.MsgAppResp, false}, + {pb.MsgVote, false}, + {pb.MsgVoteResp, false}, + {pb.MsgSnap, true}, + {pb.MsgHeartbeat, true}, + {pb.MsgHeartbeatResp, false}, + {pb.MsgTimeoutNow, true}, + {pb.MsgPreVote, false}, + {pb.MsgPreVoteResp, false}, + {pb.MsgStorageAppend, false}, + {pb.MsgStorageAppendResp, false}, + {pb.MsgStorageApply, false}, + {pb.MsgStorageApplyResp, false}, + {pb.MsgForgetLeader, false}, + {pb.MsgFortifyLeader, true}, + {pb.MsgFortifyLeaderResp, false}, + } + + for i, tt := range tests { + got := IsMsgFromLeader(tt.msgt) + if got != tt.isResponse { + t.Errorf("#%d: got %v, want %v", i, got, tt.isResponse) + } + } +} + // TestPayloadSizeOfEmptyEntry ensures that payloadSize of empty entry is always zero. // This property is important because new leaders append an empty entry to their log, // and we don't want this to count towards the uncommitted log quota. diff --git a/pkg/roachpb/BUILD.bazel b/pkg/roachpb/BUILD.bazel index 1f81819e935c..d19f00f37bad 100644 --- a/pkg/roachpb/BUILD.bazel +++ b/pkg/roachpb/BUILD.bazel @@ -75,7 +75,6 @@ go_test( "//pkg/keys", "//pkg/kv/kvserver/concurrency/isolation", "//pkg/kv/kvserver/concurrency/lock", - "//pkg/raft", "//pkg/raft/confchange", "//pkg/raft/quorum", "//pkg/raft/raftpb", diff --git a/pkg/roachpb/metadata.proto b/pkg/roachpb/metadata.proto index 3f41c4b864da..58b1ec5a8576 100644 --- a/pkg/roachpb/metadata.proto +++ b/pkg/roachpb/metadata.proto @@ -463,6 +463,10 @@ message Version { // cycle. They are subversions that are never the end versions of a release, // i.e. users of stable, public release will only use binaries with the // internal version set to 0. + // This field will be odd for fence versions, which allow the upgrades + // infrastructure to safely step through consecutive cluster versions. Note + // that -1 is a legal value, as that is the fence version for all final + // releases, which have an internal version of 0. optional int32 internal = 4 [(gogoproto.nullable) = false]; } diff --git a/pkg/roachpb/metadata_replicas_test.go b/pkg/roachpb/metadata_replicas_test.go index 479766fcbf3b..c90603fc3af3 100644 --- a/pkg/roachpb/metadata_replicas_test.go +++ b/pkg/roachpb/metadata_replicas_test.go @@ -16,7 +16,6 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/raft" "github.com/cockroachdb/cockroach/pkg/raft/confchange" "github.com/cockroachdb/cockroach/pkg/raft/quorum" "github.com/cockroachdb/cockroach/pkg/raft/raftpb" @@ -188,8 +187,8 @@ func TestReplicaDescriptorsConfState(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { r := MakeReplicaSet(test.in) - cs := r.ConfState() - require.Equal(t, test.out, raft.DescribeConfState(cs)) + cs := r.ConfState().Describe() + require.Equal(t, test.out, cs) }) } } diff --git a/pkg/roachpb/span_stats.proto b/pkg/roachpb/span_stats.proto index 1c67bc13f41d..c7d6b493a4fd 100644 --- a/pkg/roachpb/span_stats.proto +++ b/pkg/roachpb/span_stats.proto @@ -70,7 +70,11 @@ message SpanStats { // stale. cockroach.storage.enginepb.MVCCStats approximate_total_stats = 7 [(gogoproto.nullable) = false]; - // NEXT ID: 8. + + // Unique store ids for the requested span. + repeated int32 store_ids = 8 [(gogoproto.customname) = "StoreIDs", (gogoproto.casttype) = "StoreID"]; + + // NEXT ID: 9. } message SpanStatsResponse { diff --git a/pkg/roachpb/version.go b/pkg/roachpb/version.go index 1947c368d49a..27b4e09bff0e 100644 --- a/pkg/roachpb/version.go +++ b/pkg/roachpb/version.go @@ -14,7 +14,9 @@ import ( "fmt" "regexp" "strconv" + "strings" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -69,6 +71,16 @@ func (v Version) SafeFormat(p redact.SafePrinter, _ rune) { p.Printf("%d.%d", v.Major, v.Minor) return } + // NB: Internal may be -1. This is the case for all fence versions for final + // versions of a release. Handle it specially to avoid printing the -1, which + // is confusable with the `-` separator. + if v.Internal < 0 { + if buildutil.CrdbTestBuild && v.Internal != -1 { + panic(errors.Newf("%s should not have Internal less than -1", v)) + } + p.Printf("%d.%d-upgrading-final-step", v.Major, v.Minor) + return + } // If the version is offset, remove the offset and add it back to the result. We want // 1000023.1-upgrading-to-1000023.2-step-002, not 1000023.1-upgrading-to-23.2-step-002. noOffsetVersion := v @@ -94,22 +106,52 @@ func (v Version) IsFinal() bool { return v.Internal == 0 } +// IsFence returns true if this is a fence version. +// +// A version is a fence version iff Internal is odd. +func (v Version) IsFence() bool { + // NB: Internal may be -1. This is the case for all fence versions for final + // versions of a release. + return v.Internal%2 != 0 +} + // PrettyPrint returns the value in a format that makes it apparent whether or // not it is a fence version. func (v Version) PrettyPrint() string { - // If we're a version greater than v20.2 and have an odd internal version, - // we're a fence version. See fenceVersionFor in pkg/upgrade to understand - // what these are. - fenceVersion := !v.LessEq(Version{Major: 20, Minor: 2}) && (v.Internal%2) == 1 - if !fenceVersion { + if !v.IsFence() { return v.String() } return fmt.Sprintf("%v(fence)", v) } +// FenceVersion is the fence version -- the internal immediately prior -- for +// the given version. +// +// Fence versions allow the upgrades infrastructure to safely step through +// consecutive cluster versions in the presence of Nodes (running any binary +// version) being added to the cluster. See the upgrademanager package for +// intended usage. +// +// Fence versions (and the upgrades infrastructure entirely) were introduced in +// the 21.1 release cycle. In the same release cycle, we introduced the +// invariant that new user-defined versions (users being crdb engineers) must +// always have even-numbered Internal versions, thus reserving the odd numbers +// to slot in fence versions for each cluster version. See top-level +// documentation in the clusterversion package for more details. +func (v Version) FenceVersion() Version { + if v.IsFence() { + panic(errors.Newf("%s already is a fence version", v)) + } + // NB: Internal may be -1 after this. This is the case for all final versions + // for a release. + fenceV := v + fenceV.Internal-- + return fenceV +} + var ( verPattern = regexp.MustCompile( - `^(?P[0-9]+)\.(?P[0-9]+)(|(-|-upgrading(|-to-[0-9]+.[0-9]+)-step-)(?P[0-9]+))$`, + `^(?P[0-9]+)\.(?P[0-9]+)(|-upgrading-final-step|(-|-upgrading(|-to-[0-9]+.[0-9]+)-step-)(?P[-0-9]+))$`, ) verPatternMajorIdx = verPattern.SubexpIndex("major") verPatternMinorIdx = verPattern.SubexpIndex("minor") @@ -138,9 +180,14 @@ func ParseVersion(s string) (Version, error) { return int32(n) } v := Version{ - Major: toInt(matches[verPatternMajorIdx]), - Minor: toInt(matches[verPatternMinorIdx]), - Internal: toInt(matches[verPatternInternalIdx]), + Major: toInt(matches[verPatternMajorIdx]), + Minor: toInt(matches[verPatternMinorIdx]), + } + // NB: Internal is -1 for all fence versions for final versions of a release. + if strings.Contains(s, "-upgrading-final-step") { + v.Internal = -1 + } else { + v.Internal = toInt(matches[verPatternInternalIdx]) } if err != nil { return Version{}, errors.Wrapf(err, "invalid version %s", s) @@ -198,7 +245,9 @@ var successorSeries = map[ReleaseSeries]ReleaseSeries{ // ReleaseSeries obtains the release series for the given version. Specifically: // - if the version is final (Internal=0), the ReleaseSeries has the same major/minor. // - if the version is a transitional version during upgrade (e.g. v23.1-8), -// the result is the next final version (e.g. v23.1). +// the result is the next final version (e.g. v23.2). +// - if the internal version is -1 (which is the case for the fence +// version of a final version), the result has the same major/minor. // // For non-final versions (which indicate an update to the next series), this // requires knowledge of the next series; unknown non-final versions will return @@ -211,6 +260,14 @@ func (v Version) ReleaseSeries() (s ReleaseSeries, ok bool) { if v.IsFinal() { return base, true } + // NB: Internal may be -1. This is the case for all fence versions for final + // versions of a release. + if v.Internal < 0 { + if buildutil.CrdbTestBuild && v.Internal != -1 { + panic(errors.Newf("%s should not have Internal less than -1", v)) + } + return base, true + } s, ok = base.Successor() return s, ok } diff --git a/pkg/roachpb/version_test.go b/pkg/roachpb/version_test.go index fbfb880aa3ad..89ccbf2b96ea 100644 --- a/pkg/roachpb/version_test.go +++ b/pkg/roachpb/version_test.go @@ -29,6 +29,11 @@ func TestParseVersion(t *testing.T) { {s: "1000023.1-upgrading-to-1000023.2-step-004", v: Version{Major: 1000023, Minor: 1, Internal: 4}, roundtrip: true}, {s: "23.1-4", v: Version{Major: 23, Minor: 1, Internal: 4}}, {s: "23.1-upgrading-step-004", v: Version{Major: 23, Minor: 1, Internal: 4}}, + // NB: The fence version for a final version will have Internal=-1. + {s: "23.2-upgrading-final-step", v: Version{Major: 23, Minor: 2, Internal: -1}, roundtrip: true}, + // We used to have unintuitive formatting logic for the -1 internal version. + // See https://github.com/cockroachdb/cockroach/issues/129460. + {s: "23.2-upgrading-step--01", v: Version{Major: 23, Minor: 2, Internal: -1}}, } for _, tc := range testData { t.Run("", func(t *testing.T) { diff --git a/pkg/roachprod/BUILD.bazel b/pkg/roachprod/BUILD.bazel index 14d9606fde5b..e4af1abd5327 100644 --- a/pkg/roachprod/BUILD.bazel +++ b/pkg/roachprod/BUILD.bazel @@ -36,6 +36,7 @@ go_library( "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_dataexmachina_dev_side_eye_go//sideeyeclient", + "@org_golang_x_sys//unix", ], ) diff --git a/pkg/roachprod/cloud/BUILD.bazel b/pkg/roachprod/cloud/BUILD.bazel index eeffb4d90db9..d3490e75953d 100644 --- a/pkg/roachprod/cloud/BUILD.bazel +++ b/pkg/roachprod/cloud/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//pkg/roachprod/ui", "//pkg/roachprod/vm", "//pkg/roachprod/vm/aws", + "//pkg/roachprod/vm/azure", "//pkg/roachprod/vm/gce", "//pkg/util/timeutil", "@com_github_aws_aws_sdk_go//aws", diff --git a/pkg/roachprod/cloud/gc.go b/pkg/roachprod/cloud/gc.go index ae73bcf9dd37..eade5d839597 100644 --- a/pkg/roachprod/cloud/gc.go +++ b/pkg/roachprod/cloud/gc.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachprod/config" "github.com/cockroachdb/cockroach/pkg/roachprod/logger" "github.com/cockroachdb/cockroach/pkg/roachprod/vm" + "github.com/cockroachdb/cockroach/pkg/roachprod/vm/azure" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" @@ -564,3 +565,39 @@ func GCDNS(l *logger.Logger, cloud *Cloud, dryrun bool) error { } return nil } + +// GCAzure iterates through subscription IDs passed in --azure-subscription-names +// and performs GC on them. +// N.B. this function does not preserve the existing subscription ID set in the +// provider. +func GCAzure(l *logger.Logger, dryrun bool) error { + provider := vm.Providers[azure.ProviderName] + var azureSubscriptions []string + p, ok := provider.(*azure.Provider) + if ok { + azureSubscriptions = p.SubscriptionNames + } + + if len(azureSubscriptions) == 0 { + // If no subscription names were specified, then fall back to cleaning up + // the subscription ID specified in the env or the default subscription. + cld, _ := ListCloud(l, vm.ListOptions{IncludeEmptyClusters: true, IncludeProviders: []string{azure.ProviderName}}) + return GCClusters(l, cld, dryrun) + } + + ctx, cancel := context.WithTimeout(context.Background(), p.OperationTimeout) + defer cancel() + var combinedErrors error + for _, subscription := range azureSubscriptions { + if err := p.SetSubscription(ctx, subscription); err != nil { + combinedErrors = errors.CombineErrors(combinedErrors, err) + continue + } + + cld, _ := ListCloud(l, vm.ListOptions{IncludeEmptyClusters: true, IncludeProviders: []string{azure.ProviderName}}) + if err := GCClusters(l, cld, dryrun); err != nil { + combinedErrors = errors.CombineErrors(combinedErrors, err) + } + } + return combinedErrors +} diff --git a/pkg/roachprod/config/config.go b/pkg/roachprod/config/config.go index b20e6677c8a3..54211b500589 100644 --- a/pkg/roachprod/config/config.go +++ b/pkg/roachprod/config/config.go @@ -149,6 +149,10 @@ const ( // DefaultNumFilesLimit is the default limit on the number of files that can // be opened by the process. DefaultNumFilesLimit = 65 << 13 + + // DisableMetamorphicTestingEnvVar is the env var needed to disable metamorphic testing + // from being eligible. + DisableMetamorphicTestingEnvVar = "COCKROACH_INTERNAL_DISABLE_METAMORPHIC_TESTING=true" ) // DefaultEnvVars returns default environment variables used in conjunction with CLI and MakeClusterSettings. @@ -162,6 +166,13 @@ func DefaultEnvVars() []string { // in testing the upgrade logic that users would actually run when // they upgrade from one release to another. "COCKROACH_TESTING_FORCE_RELEASE_BRANCH=true", + // Disable metamorphic testing to reduce flakiness as most metamorphic + // constants are not fully tested for compatibility in roachtests. + // Passing this in when the cluster is started would suffice in terms + // of correctness, but the metamorphic framework logs constants during + // init. This leads to a lot of noise in the logs, even if metamorphic + // constants aren't used in the test itself. + DisableMetamorphicTestingEnvVar, } } diff --git a/pkg/roachprod/install/cluster_synced.go b/pkg/roachprod/install/cluster_synced.go index 9537d9958af0..772302959a8b 100644 --- a/pkg/roachprod/install/cluster_synced.go +++ b/pkg/roachprod/install/cluster_synced.go @@ -392,14 +392,15 @@ func (c *SyncedCluster) newSession( // for shared-process configurations.) // // When Stop needs to kill a process without other flags, the signal -// is 9 (SIGKILL) and wait is true. If maxWait is non-zero, Stop stops -// waiting after that approximate number of seconds. +// is 9 (SIGKILL) and wait is true. If gracePeriod is non-zero, Stop +// stops waiting after that approximate number of seconds, sending a +// SIGKILL if the process is still running after that time. func (c *SyncedCluster) Stop( ctx context.Context, l *logger.Logger, sig int, wait bool, - maxWait int, + gracePeriod int, virtualClusterLabel string, ) error { // virtualClusterDisplay includes information about the virtual @@ -443,7 +444,7 @@ func (c *SyncedCluster) Stop( if wait { display += " and waiting" } - return c.kill(ctx, l, "stop", display, sig, wait, maxWait, virtualClusterLabel) + return c.kill(ctx, l, "stop", display, sig, wait, gracePeriod, virtualClusterLabel) } else { cmd := fmt.Sprintf("ALTER TENANT '%s' STOP SERVICE", virtualClusterName) res, err := c.ExecSQL(ctx, l, c.Nodes[:1], "", 0, DefaultAuthMode(), "", /* database */ @@ -463,25 +464,25 @@ func (c *SyncedCluster) Stop( // Signal sends a signal to the CockroachDB process. func (c *SyncedCluster) Signal(ctx context.Context, l *logger.Logger, sig int) error { display := fmt.Sprintf("%s: sending signal %d", c.Name, sig) - return c.kill(ctx, l, "signal", display, sig, false /* wait */, 0 /* maxWait */, "") + return c.kill(ctx, l, "signal", display, sig, false /* wait */, 0 /* gracePeriod */, "") } // kill sends the signal sig to all nodes in the cluster using the kill command. // cmdName and display specify the roachprod subcommand and a status message, // for output/logging. If wait is true, the command will wait for the processes -// to exit, up to maxWait seconds. +// to exit, up to gracePeriod seconds. func (c *SyncedCluster) kill( ctx context.Context, l *logger.Logger, cmdName, display string, sig int, wait bool, - maxWait int, + gracePeriod int, virtualClusterLabel string, ) error { const timedOutMessage = "timed out" - if sig == 9 { + if sig == int(unix.SIGKILL) { // `kill -9` without wait is never what a caller wants. See #77334. wait = true } @@ -508,7 +509,7 @@ func (c *SyncedCluster) kill( echo "${pid}: dead" >> %[1]s/roachprod.log done`, c.LogDir(node, "", 0), // [1] - maxWait, // [2] + gracePeriod, // [2] timedOutMessage, // [3] ) } @@ -549,10 +550,13 @@ fi`, return res, err } - if wait && strings.Contains(res.CombinedOut, timedOutMessage) { - return res, fmt.Errorf( - "timed out after %ds waiting for n%d to drain and shutdown", - maxWait, node, + // If the process has not terminated after the grace period, + // perform a forceful termination. + if wait && sig != int(unix.SIGKILL) && strings.Contains(res.CombinedOut, timedOutMessage) { + l.Printf("n%d did not shutdown after %ds, performing a SIGKILL", node, gracePeriod) + return res, errors.Wrapf( + c.kill(ctx, l, cmdName, display, int(unix.SIGKILL), true, 0, virtualClusterLabel), + "failed to forcefully terminate n%d", node, ) } @@ -563,7 +567,7 @@ fi`, // Wipe TODO(peter): document func (c *SyncedCluster) Wipe(ctx context.Context, l *logger.Logger, preserveCerts bool) error { display := fmt.Sprintf("%s: wiping", c.Name) - if err := c.Stop(ctx, l, 9, true /* wait */, 0 /* maxWait */, ""); err != nil { + if err := c.Stop(ctx, l, int(unix.SIGKILL), true /* wait */, 0 /* gracePeriod */, ""); err != nil { return err } return c.Parallel(ctx, l, WithNodes(c.Nodes).WithDisplay(display), func(ctx context.Context, node Node) (*RunResultDetails, error) { @@ -1147,6 +1151,7 @@ type RunCmdOptions struct { stdin io.Reader stdout, stderr io.Writer remoteOptions []remoteSessionOption + expanderConfig ExpanderConfig } // Default RunCmdOptions enable combining output (stdout and stderr) and capturing ssh (verbose) debug output. @@ -1183,7 +1188,7 @@ func (c *SyncedCluster) runCmdOnSingleNode( var noResult RunResultDetails // Argument template expansion is node specific (e.g. for {store-dir}). e := expander{node: node} - expandedCmd, err := e.expand(ctx, l, c, cmd) + expandedCmd, err := e.expand(ctx, l, c, opts.expanderConfig, cmd) if err != nil { return &noResult, errors.WithDetailf(err, "error expanding command: %s", cmd) } @@ -1291,6 +1296,7 @@ func (c *SyncedCluster) Run( includeRoachprodEnvVars: true, stdout: stdout, stderr: stderr, + expanderConfig: options.ExpanderConfig, } result, err := c.runCmdOnSingleNode(ctx, l, node, cmd, opts) return result, err @@ -1672,6 +1678,7 @@ func (c *SyncedCluster) DistributeCerts( cmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(1)) } cmd += fmt.Sprintf(` +%[6]s rm -fr %[2]s mkdir -p %[2]s VERSION=$(%[1]s version --build-tag) @@ -1685,7 +1692,7 @@ fi %[1]s cert create-client %[3]s --certs-dir=%[2]s --ca-key=%[2]s/ca.key $TENANT_SCOPE_OPT %[1]s cert create-node %[4]s --certs-dir=%[2]s --ca-key=%[2]s/ca.key tar cvf %[5]s %[2]s -`, cockroachNodeBinary(c, 1), CockroachNodeCertsDir, DefaultUser, strings.Join(nodeNames, " "), certsTarName) +`, cockroachNodeBinary(c, 1), CockroachNodeCertsDir, DefaultUser, strings.Join(nodeNames, " "), certsTarName, SuppressMetamorphicConstantsEnvVar()) return c.runCmdOnSingleNode(ctx, l, node, cmd, defaultCmdOpts("init-certs")) }, @@ -1729,11 +1736,12 @@ func (c *SyncedCluster) RedistributeNodeCert(ctx context.Context, l *logger.Logg cmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(1)) } cmd += fmt.Sprintf(` +%[6]s rm -fr %[2]s/node* mkdir -p %[2]s %[1]s cert create-node %[4]s --certs-dir=%[2]s --ca-key=%[2]s/ca.key tar cvf %[5]s %[2]s -`, cockroachNodeBinary(c, 1), CockroachNodeCertsDir, DefaultUser, strings.Join(nodeNames, " "), certsTarName) +`, cockroachNodeBinary(c, 1), CockroachNodeCertsDir, DefaultUser, strings.Join(nodeNames, " "), certsTarName, SuppressMetamorphicConstantsEnvVar()) return c.runCmdOnSingleNode(ctx, l, node, cmd, defaultCmdOpts("redist-node-cert")) }, @@ -1811,6 +1819,7 @@ func (c *SyncedCluster) createTenantCertBundle( cmd += fmt.Sprintf(`cd %s ; `, c.localVMDir(1)) } cmd += fmt.Sprintf(` +%[7]s CERT_DIR=%[1]s-%[5]d/certs CA_KEY=%[2]s/ca.key @@ -1835,6 +1844,7 @@ tar cvf %[6]s $CERT_DIR strings.Join(nodeNames, " "), virtualClusterID, bundleName, + SuppressMetamorphicConstantsEnvVar(), ) return c.runCmdOnSingleNode(ctx, l, node, cmd, defaultCmdOpts("create-tenant-cert-bundle")) @@ -2076,6 +2086,10 @@ func (c *SyncedCluster) Put( var wg sync.WaitGroup wg.Add(len(nodes)) + // We currently don't accept any custom expander configurations in + // this function. + var expanderConfig ExpanderConfig + // Each destination for the copy needs a source to copy from. We create a // channel that has capacity for each destination. If we try to add a source // and the channel is full we can simply drop that source as we know we won't @@ -2108,7 +2122,7 @@ func (c *SyncedCluster) Put( e := expander{ node: nodes[i], } - dest, err := e.expand(ctx, l, c, dest) + dest, err := e.expand(ctx, l, c, expanderConfig, dest) if err != nil { return "", err } @@ -2131,7 +2145,7 @@ func (c *SyncedCluster) Put( node: nodes[i], } var err error - dest, err = e.expand(ctx, l, c, dest) + dest, err = e.expand(ctx, l, c, expanderConfig, dest) if err != nil { results <- result{i, err} return @@ -2421,6 +2435,10 @@ func (c *SyncedCluster) Get( spinner.TaskStatus(nodeID, fmt.Sprintf(" %2d: %s", nodeID, msg), done) } + // We currently don't accept any custom expander configurations in + // this function. + var expanderConfig ExpanderConfig + var wg sync.WaitGroup for i := range nodes { nodeTaskStatus(nodes[i], "copying", false) @@ -2439,7 +2457,7 @@ func (c *SyncedCluster) Get( e := expander{ node: nodes[i], } - src, err := e.expand(ctx, l, c, src) + src, err := e.expand(ctx, l, c, expanderConfig, src) if err != nil { results <- result{i, err} return @@ -2674,8 +2692,11 @@ func (c *SyncedCluster) SSH(ctx context.Context, l *logger.Logger, sshArgs, args node: targetNode, } var expandedArgs []string + // We currently don't accept any custom expander configurations in + // this function. + var expanderConfig ExpanderConfig for _, arg := range args { - expandedArg, err := e.expand(ctx, l, c, arg) + expandedArg, err := e.expand(ctx, l, c, expanderConfig, arg) if err != nil { return err } diff --git a/pkg/roachprod/install/cockroach.go b/pkg/roachprod/install/cockroach.go index d7a22865a8f5..671211cd9dd6 100644 --- a/pkg/roachprod/install/cockroach.go +++ b/pkg/roachprod/install/cockroach.go @@ -139,7 +139,7 @@ type StartOpts struct { StorageCluster *SyncedCluster // IsRestart allows skipping steps that are used during initial start like - // initialization and sequential node starts. + // initialization and sequential node starts and also reuses the previous start script. IsRestart bool // EnableFluentSink determines whether to enable the fluent-servers attribute @@ -750,7 +750,7 @@ func (c *SyncedCluster) ExecSQL( if c.IsLocal() { cmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(node)) } - cmd += cockroachNodeBinary(c, node) + " sql --url " + + cmd += SuppressMetamorphicConstantsEnvVar() + " " + cockroachNodeBinary(c, node) + " sql --url " + c.NodeURL("localhost", desc.Port, virtualClusterName, desc.ServiceMode, authMode, database) + " " + ssh.Escape(args) return c.runCmdOnSingleNode(ctx, l, node, cmd, defaultCmdOpts("run-sql")) @@ -762,30 +762,35 @@ func (c *SyncedCluster) ExecSQL( func (c *SyncedCluster) startNodeWithResult( ctx context.Context, l *logger.Logger, node Node, startOpts StartOpts, ) (*RunResultDetails, error) { - startCmd, err := c.generateStartCmd(ctx, l, node, startOpts) - if err != nil { - return newRunResultDetails(node, err), err - } - var uploadCmd string - if c.IsLocal() { - uploadCmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(node)) - } startScriptPath := StartScriptPath(startOpts.VirtualClusterName, startOpts.SQLInstance) - uploadCmd += fmt.Sprintf(`cat > %[1]s && chmod +x %[1]s`, startScriptPath) - - var res = &RunResultDetails{} - uploadOpts := defaultCmdOpts("upload-start-script") - uploadOpts.stdin = strings.NewReader(startCmd) - res, err = c.runCmdOnSingleNode(ctx, l, node, uploadCmd, uploadOpts) - if err != nil || res.Err != nil { - return res, err - } - var runScriptCmd string if c.IsLocal() { runScriptCmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(node)) } runScriptCmd += "./" + startScriptPath + + // If we are performing a restart, the start script should already + // exist, and we are going to reuse it. + if !startOpts.IsRestart { + startCmd, err := c.generateStartCmd(ctx, l, node, startOpts) + if err != nil { + return newRunResultDetails(node, err), err + } + var uploadCmd string + if c.IsLocal() { + uploadCmd = fmt.Sprintf(`cd %s ; `, c.localVMDir(node)) + } + uploadCmd += fmt.Sprintf(`cat > %[1]s && chmod +x %[1]s`, startScriptPath) + + var res = &RunResultDetails{} + uploadOpts := defaultCmdOpts("upload-start-script") + uploadOpts.stdin = strings.NewReader(startCmd) + res, err = c.runCmdOnSingleNode(ctx, l, node, uploadCmd, uploadOpts) + if err != nil || res.Err != nil { + return res, err + } + } + return c.runCmdOnSingleNode(ctx, l, node, runScriptCmd, defaultCmdOpts("run-start-script")) } @@ -1068,8 +1073,11 @@ func (c *SyncedCluster) generateStartArgs( e := expander{ node: node, } + // We currently don't accept any custom expander configurations in + // this function. + var expanderConfig ExpanderConfig for i, arg := range args { - expandedArg, err := e.expand(ctx, l, c, arg) + expandedArg, err := e.expand(ctx, l, c, expanderConfig, arg) if err != nil { return nil, err } @@ -1312,8 +1320,8 @@ func (c *SyncedCluster) generateClusterSettingCmd( // store is used. clusterSettingsCmd += fmt.Sprintf(` if ! test -e %s ; then - COCKROACH_CONNECT_TIMEOUT=%d %s sql --url %s -e "%s" && mkdir -p %s && touch %s - fi`, path, startSQLTimeout, binary, url, clusterSettingsString, c.NodeDir(node, 1 /* storeIndex */), path) + %s COCKROACH_CONNECT_TIMEOUT=%d %s sql --url %s -e "%s" && mkdir -p %s && touch %s + fi`, path, SuppressMetamorphicConstantsEnvVar(), startSQLTimeout, binary, url, clusterSettingsString, c.NodeDir(node, 1 /* storeIndex */), path) return clusterSettingsCmd, nil } @@ -1332,8 +1340,8 @@ func (c *SyncedCluster) generateInitCmd(ctx context.Context, node Node) (string, binary := cockroachNodeBinary(c, node) initCmd += fmt.Sprintf(` if ! test -e %[1]s ; then - COCKROACH_CONNECT_TIMEOUT=%[4]d %[2]s init --url %[3]s && touch %[1]s - fi`, path, binary, url, startSQLTimeout) + %[2]s COCKROACH_CONNECT_TIMEOUT=%[5]d %[3]s init --url %[4]s && touch %[1]s + fi`, path, SuppressMetamorphicConstantsEnvVar(), binary, url, startSQLTimeout) return initCmd, nil } @@ -1371,7 +1379,10 @@ func (c *SyncedCluster) generateKeyCmd( } e := expander{node: node} - expanded, err := e.expand(ctx, l, c, keyCmd.String()) + // We currently don't accept any custom expander configurations in + // this function. + var expanderConfig ExpanderConfig + expanded, err := e.expand(ctx, l, c, expanderConfig, keyCmd.String()) if err != nil { return "", err } @@ -1530,8 +1541,8 @@ func (c *SyncedCluster) createFixedBackupSchedule( } url := c.NodeURL("localhost", port, startOpts.VirtualClusterName, serviceMode, AuthRootCert, "" /* database */) - fullCmd := fmt.Sprintf(`COCKROACH_CONNECT_TIMEOUT=%d %s sql --url %s -e %q`, - startSQLTimeout, binary, url, createScheduleCmd) + fullCmd := fmt.Sprintf(`%s COCKROACH_CONNECT_TIMEOUT=%d %s sql --url %s -e %q`, + SuppressMetamorphicConstantsEnvVar(), startSQLTimeout, binary, url, createScheduleCmd) // Instead of using `c.ExecSQL()`, use `c.runCmdOnSingleNode()`, which allows us to // 1) prefix the schedule backup cmd with COCKROACH_CONNECT_TIMEOUT. // 2) run the command against the first node in the cluster target. @@ -1561,3 +1572,16 @@ func getEnvVars() []string { } return sl } + +// SuppressMetamorphicConstantsEnvVar returns the env var to disable metamorphic testing. +// This doesn't actually disable metamorphic constants for a test **unless** it is used +// when starting the cluster. This does however suppress the metamorphic constants being +// logged for every cockroach invocation, which while benign, can be very confusing as the +// constants will be different than what the cockroach cluster is actually using. +// +// TODO(darrylwong): Ideally, the metamorphic constants framework would be smarter and only +// log constants when asked for instead of unconditionally on init(). That way we can remove +// this workaround and just log the constants once when the cluster is started. +func SuppressMetamorphicConstantsEnvVar() string { + return config.DisableMetamorphicTestingEnvVar +} diff --git a/pkg/roachprod/install/expander.go b/pkg/roachprod/install/expander.go index 19114c4a0942..05d5c0ec563f 100644 --- a/pkg/roachprod/install/expander.go +++ b/pkg/roachprod/install/expander.go @@ -46,6 +46,10 @@ var storeDirRe = regexp.MustCompile(`{store-dir(:[0-9]+)?}`) var logDirRe = regexp.MustCompile(`{log-dir(:[a-z0-9\-]+)?(:[0-9]+)?}`) var certsDirRe = regexp.MustCompile(`{certs-dir}`) +type ExpanderConfig struct { + DefaultVirtualCluster string +} + // expander expands a string which contains templated parameters for cluster // attributes like pgurl, pghost, pgport, uiport, store-dir, and log-dir with // the corresponding values. @@ -59,11 +63,11 @@ type expander struct { } // expanderFunc is a function which may expand a string with a templated value. -type expanderFunc func(context.Context, *logger.Logger, *SyncedCluster, string) (expanded string, didExpand bool, err error) +type expanderFunc func(context.Context, *logger.Logger, *SyncedCluster, ExpanderConfig, string) (expanded string, didExpand bool, err error) // expand will expand arg if it contains an expander template. func (e *expander) expand( - ctx context.Context, l *logger.Logger, c *SyncedCluster, arg string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, cfg ExpanderConfig, arg string, ) (string, error) { var err error s := parameterRe.ReplaceAllStringFunc(arg, func(s string) string { @@ -80,7 +84,7 @@ func (e *expander) expand( e.maybeExpandCertsDir, } for _, f := range expanders { - v, expanded, fErr := f(ctx, l, c, s) + v, expanded, fErr := f(ctx, l, c, cfg, s) if fErr != nil { err = fErr return "" @@ -127,12 +131,15 @@ func (e *expander) maybeExpandMap( } // extractVirtualClusterInfo extracts the virtual cluster name and -// instance from the given group match, if available. If no -// information is provided, the system interface is assumed and if no -// instance is provided, the first instance is assumed. -func extractVirtualClusterInfo(matches []string) (string, int, error) { +// instance from the given group match, if available. If no default or +// custom tenant is provided, an empty string is returned. During +// service discovery, this will mean that the service for the system +// tenant is used. +func extractVirtualClusterInfo( + matches []string, defaultVirtualCluster string, +) (string, int, error) { // Defaults if the passed in group match is empty. - var virtualClusterName string + virtualClusterName := defaultVirtualCluster var sqlInstance int // Extract the cluster name and instance matches. @@ -158,7 +165,7 @@ func extractVirtualClusterInfo(matches []string) (string, int, error) { // maybeExpandPgURL is an expanderFunc for {pgurl:[:virtualCluster[:sqlInstance]]} func (e *expander) maybeExpandPgURL( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, cfg ExpanderConfig, s string, ) (string, bool, error) { var err error m := pgURLRe.FindStringSubmatch(s) @@ -169,7 +176,7 @@ func (e *expander) maybeExpandPgURL( if e.pgURLs == nil { e.pgURLs = make(map[string]map[Node]string) } - virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:]) + virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:], cfg.DefaultVirtualCluster) if err != nil { return "", false, err } @@ -191,13 +198,13 @@ func (e *expander) maybeExpandPgURL( // maybeExpandPgHost is an expanderFunc for {pghost:} func (e *expander) maybeExpandPgHost( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, cfg ExpanderConfig, s string, ) (string, bool, error) { m := pgHostRe.FindStringSubmatch(s) if m == nil { return s, false, nil } - virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:]) + virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:], cfg.DefaultVirtualCluster) if err != nil { return "", false, err } @@ -235,13 +242,13 @@ func (e *expander) maybeExpandPgHost( // maybeExpandPgURL is an expanderFunc for {pgport:} func (e *expander) maybeExpandPgPort( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, cfg ExpanderConfig, s string, ) (string, bool, error) { m := pgPortRe.FindStringSubmatch(s) if m == nil { return s, false, nil } - virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:]) + virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[2:], cfg.DefaultVirtualCluster) if err != nil { return "", false, err } @@ -263,7 +270,7 @@ func (e *expander) maybeExpandPgPort( // maybeExpandPgURL is an expanderFunc for {uiport:} func (e *expander) maybeExpandUIPort( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, _ ExpanderConfig, s string, ) (string, bool, error) { m := uiPortRe.FindStringSubmatch(s) if m == nil { @@ -286,7 +293,7 @@ func (e *expander) maybeExpandUIPort( // where storeIndex is optional and defaults to 1. Note that storeIndex is the // store's index on multi-store nodes, not the store ID. func (e *expander) maybeExpandStoreDir( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, _ ExpanderConfig, s string, ) (string, bool, error) { m := storeDirRe.FindStringSubmatch(s) if m == nil { @@ -305,13 +312,13 @@ func (e *expander) maybeExpandStoreDir( // maybeExpandLogDir is an expanderFunc for "{log-dir}" func (e *expander) maybeExpandLogDir( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, cfg ExpanderConfig, s string, ) (string, bool, error) { m := logDirRe.FindStringSubmatch(s) if m == nil { return s, false, nil } - virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[1:]) + virtualClusterName, sqlInstance, err := extractVirtualClusterInfo(m[1:], cfg.DefaultVirtualCluster) if err != nil { return "", false, err } @@ -320,7 +327,7 @@ func (e *expander) maybeExpandLogDir( // maybeExpandCertsDir is an expanderFunc for "{certs-dir}" func (e *expander) maybeExpandCertsDir( - ctx context.Context, l *logger.Logger, c *SyncedCluster, s string, + ctx context.Context, l *logger.Logger, c *SyncedCluster, _ ExpanderConfig, s string, ) (string, bool, error) { if !certsDirRe.MatchString(s) { return s, false, nil diff --git a/pkg/roachprod/install/run_options.go b/pkg/roachprod/install/run_options.go index e85d5b86eed9..1f02bd335465 100644 --- a/pkg/roachprod/install/run_options.go +++ b/pkg/roachprod/install/run_options.go @@ -27,6 +27,9 @@ type RunOptions struct { // recommended to check the documentation of the function you are using to see // what the default behaviour is. FailOption FailOption + // ExpanderConfig configures the behaviour of the roachprod expander + // during a run. + ExpanderConfig ExpanderConfig // These are private to roachprod Nodes Nodes @@ -103,3 +106,8 @@ func (r RunOptions) WithDisplay(display string) RunOptions { r.Display = display return r } + +func (r RunOptions) WithExpanderConfig(cfg ExpanderConfig) RunOptions { + r.ExpanderConfig = cfg + return r +} diff --git a/pkg/roachprod/k8s/roachprod-gc.yaml b/pkg/roachprod/k8s/roachprod-gc.yaml index 9f0c2600ce51..1251df0af8aa 100644 --- a/pkg/roachprod/k8s/roachprod-gc.yaml +++ b/pkg/roachprod/k8s/roachprod-gc.yaml @@ -10,7 +10,7 @@ # --from-literal azure_password=XYZZY # --from-literal azure_tenant_id=XYZZY # --from-literal slack_token=XYZZY -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: roachprod-gc-cronjob @@ -31,13 +31,14 @@ spec: spec: containers: - name: roachprod-gc-cronjob - image: gcr.io/cockroach-dev-inf/cockroachlabs/roachprod:485975b3a82 + image: gcr.io/cockroach-dev-inf/cockroachlabs/roachprod:1d01263b3a4 args: - gc - --gce-project=cockroach-ephemeral,cockroach-roachstress - --slack-token - $(SLACK_TOKEN) - --aws-account-ids=541263489771,337380398238 + - --azure-subscription-names=e2e-adhoc,e2e-infra,Microsoft Azure Sponsorship env: - name: SLACK_TOKEN valueFrom: diff --git a/pkg/roachprod/multitenant.go b/pkg/roachprod/multitenant.go index 8524e245538e..f8d8d3e7555d 100644 --- a/pkg/roachprod/multitenant.go +++ b/pkg/roachprod/multitenant.go @@ -66,5 +66,5 @@ func StopServiceForVirtualCluster( } label := install.VirtualClusterLabel(stopOpts.VirtualClusterName, stopOpts.SQLInstance) - return c.Stop(ctx, l, stopOpts.Sig, stopOpts.Wait, stopOpts.MaxWait, label) + return c.Stop(ctx, l, stopOpts.Sig, stopOpts.Wait, stopOpts.GracePeriod, label) } diff --git a/pkg/roachprod/roachprod.go b/pkg/roachprod/roachprod.go index f9fdd02f47e7..6137355039b5 100644 --- a/pkg/roachprod/roachprod.go +++ b/pkg/roachprod/roachprod.go @@ -58,6 +58,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" + "golang.org/x/sys/unix" ) // MalformedClusterNameError is returned when the cluster name passed to Create is invalid. @@ -209,7 +210,7 @@ func newCluster( // userClusterNameRegexp returns a regexp that matches all clusters owned by the // current user. -func userClusterNameRegexp(l *logger.Logger) (*regexp.Regexp, error) { +func userClusterNameRegexp(l *logger.Logger, optionalUsername string) (*regexp.Regexp, error) { // In general, we expect that users will have the same // account name across the services they're using, // but we still want to function even if this is not @@ -219,7 +220,11 @@ func userClusterNameRegexp(l *logger.Logger) (*regexp.Regexp, error) { if err != nil { return nil, err } - pattern := "" + + var pattern string + if optionalUsername != "" { + pattern += fmt.Sprintf(`(^%s-)`, regexp.QuoteMeta(optionalUsername)) + } for _, account := range accounts { if !seenAccounts[account] { seenAccounts[account] = true @@ -364,7 +369,7 @@ func List( if clusterNamePattern == "" { if listMine { var err error - listPattern, err = userClusterNameRegexp(l) + listPattern, err = userClusterNameRegexp(l, opts.Username) if err != nil { return cloud.Cloud{}, err } @@ -881,9 +886,10 @@ type StopOpts struct { // If Wait is set, roachprod waits until the PID disappears (i.e. the // process has terminated). Wait bool // forced to true when Sig == 9 - // If MaxWait is set, roachprod waits that approximate number of seconds - // until the PID disappears. - MaxWait int + // GracePeriod is the mount of time (in seconds) roachprod will wait + // until the PID disappears. If the process is not terminated after + // that time, a hard stop (SIGKILL) is performed. + GracePeriod int // Options that only apply to StopServiceForVirtualCluster VirtualClusterID int @@ -894,10 +900,10 @@ type StopOpts struct { // DefaultStopOpts returns StopOpts populated with the default values used by Stop. func DefaultStopOpts() StopOpts { return StopOpts{ - ProcessTag: "", - Sig: 9, - Wait: false, - MaxWait: 0, + ProcessTag: "", + Sig: int(unix.SIGKILL), + Wait: false, + GracePeriod: 0, } } @@ -908,7 +914,7 @@ func Stop(ctx context.Context, l *logger.Logger, clusterName string, opts StopOp return err } - return c.Stop(ctx, l, opts.Sig, opts.Wait, opts.MaxWait, "") + return c.Stop(ctx, l, opts.Sig, opts.Wait, opts.GracePeriod, "") } // Signal sends a signal to nodes in the cluster. @@ -994,7 +1000,7 @@ func Install(ctx context.Context, l *logger.Logger, clusterName string, software err := install.Install(ctx, l, c, software) err = errors.Wrapf(err, "retryable infrastructure error: could not install %s", software) if err != nil { - l.Printf(err.Error()) + l.Printf("%s", err) } return err }) @@ -1385,7 +1391,11 @@ func Pprof(ctx context.Context, l *logger.Logger, clusterName string, opts Pprof // Destroy TODO func Destroy( - l *logger.Logger, destroyAllMine bool, destroyAllLocal bool, clusterNames ...string, + l *logger.Logger, + optionalUsername string, + destroyAllMine bool, + destroyAllLocal bool, + clusterNames ...string, ) error { if err := LoadClusters(); err != nil { return errors.Wrap(err, "problem loading clusters") @@ -1402,7 +1412,7 @@ func Destroy( if destroyAllLocal { return errors.New("--all-mine cannot be combined with --all-local") } - destroyPattern, err := userClusterNameRegexp(l) + destroyPattern, err := userClusterNameRegexp(l, optionalUsername) if err != nil { return err } @@ -1731,9 +1741,13 @@ func GC(l *logger.Logger, dryrun bool) error { return cloud.GCAWS(l, dryrun) }) + addOpFn(func() error { + return cloud.GCAzure(l, dryrun) + }) + // ListCloud may fail for a provider, but we can still attempt GC on // the clusters we do have. - cld, _ := cloud.ListCloud(l, vm.ListOptions{IncludeEmptyClusters: true, IncludeProviders: []string{gce.ProviderName, azure.ProviderName}}) + cld, _ := cloud.ListCloud(l, vm.ListOptions{IncludeEmptyClusters: true, IncludeProviders: []string{gce.ProviderName}}) addOpFn(func() error { return cloud.GCClusters(l, cld, dryrun) }) @@ -2796,7 +2810,7 @@ func Deploy( pauseDuration time.Duration, sig int, wait bool, - maxWait int, + gracePeriod int, secure bool, ) error { // Stage supports `workload` as well, so it needs to be excluded here. This @@ -2825,7 +2839,7 @@ func Deploy( for _, node := range c.TargetNodes() { curNode := []install.Node{node} - err = c.WithNodes(curNode).Stop(ctx, l, sig, wait, maxWait, "") + err = c.WithNodes(curNode).Stop(ctx, l, sig, wait, gracePeriod, "") if err != nil { return err } diff --git a/pkg/roachprod/vm/aws/aws.go b/pkg/roachprod/vm/aws/aws.go index 19f330cd1fc4..a55f6808effd 100644 --- a/pkg/roachprod/vm/aws/aws.go +++ b/pkg/roachprod/vm/aws/aws.go @@ -406,7 +406,7 @@ var defaultConfig = func() (cfg *awsConfig) { return cfg }() -// defaultZones is the list of availability zones used by default for +// DefaultZones is the list of availability zones used by default for // cluster creation. If the geo flag is specified, nodes are // distributed between zones. // @@ -415,7 +415,7 @@ var defaultConfig = func() (cfg *awsConfig) { // doesn't support multi-regional buckets, thus resulting in material // egress cost if the test loads from a different region. See // https://github.com/cockroachdb/cockroach/issues/105968. -var defaultZones = []string{ +var DefaultZones = []string{ "us-east-2a", "us-west-2b", "eu-west-2b", @@ -482,7 +482,7 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { fmt.Sprintf("aws availability zones to use for cluster creation. If zones are formatted\n"+ "as AZ:N where N is an integer, the zone will be repeated N times. If > 1\n"+ "zone specified, the cluster will be spread out evenly by zone regardless\n"+ - "of geo (default [%s])", strings.Join(defaultZones, ","))) + "of geo (default [%s])", strings.Join(DefaultZones, ","))) flags.StringVar(&o.ImageAMI, ProviderName+"-image-ami", o.ImageAMI, "Override image AMI to use. See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html") flags.BoolVar(&o.UseMultipleDisks, ProviderName+"-enable-multiple-stores", @@ -657,7 +657,7 @@ func (p *Provider) Create( } if len(expandedZones) == 0 { - expandedZones = defaultZones + expandedZones = DefaultZones } // We need to make sure that the SSH keys have been distributed to all regions. diff --git a/pkg/roachprod/vm/azure/azure.go b/pkg/roachprod/vm/azure/azure.go index a3079ed6f61c..edbcfc11b869 100644 --- a/pkg/roachprod/vm/azure/azure.go +++ b/pkg/roachprod/vm/azure/azure.go @@ -39,7 +39,8 @@ import ( ) const ( - defaultSubscription = "e2e-infra" + defaultSubscription = "e2e-adhoc" + SubscriptionIDEnvVar = "AZURE_SUBSCRIPTION_ID" // ProviderName is "azure". ProviderName = "azure" remoteUser = "ubuntu" @@ -85,6 +86,9 @@ type Provider struct { OperationTimeout time.Duration // Wait for deletions to finish before returning. SyncDelete bool + // The list of subscription names to use. Currently only used by GC. + // If left empty then falls back to env var then default subscription. + SubscriptionNames []string mu struct { syncutil.Mutex @@ -315,9 +319,9 @@ func (p *Provider) Create( if len(providerOpts.Locations) == 0 { if opts.GeoDistributed { - providerOpts.Locations = defaultLocations + providerOpts.Locations = DefaultLocations } else { - providerOpts.Locations = []string{defaultLocations[0]} + providerOpts.Locations = []string{DefaultLocations[0]} } } @@ -499,7 +503,7 @@ func (p *Provider) DeleteCluster(l *logger.Logger, name string) error { // We have seen occurrences of Azure resource groups losing the necessary tags // needed for roachprod to find them. The cluster may need to be manually deleted // through the Azure portal. - return errors.Newf("**** MANUAL INTERVENTION REQUIRED ****\nDeleteCluster: Found no azure resource groups with tag cluster: %s", name) + return errors.Newf("**** MANUAL INTERVENTION REQUIRED IF ERROR SEEN MULTIPLE TIMES ****\nDeleteCluster: Found no azure resource groups with tag cluster: %s", name) } if !p.SyncDelete { @@ -1546,6 +1550,57 @@ func (p *Provider) createUltraDisk( return disk, err } +// SetSubscription takes in a subscription name then finds and stores the ID +// in the Provider instance. +func (p *Provider) SetSubscription(ctx context.Context, subscription string) error { + subscriptionId, err := p.findSubscriptionID(ctx, subscription) + if err != nil { + return err + } + p.mu.Lock() + defer p.mu.Unlock() + p.mu.subscriptionId = subscriptionId + + return nil +} + +// findSubscriptionID takes in a subscription name and returns the ID. +func (p *Provider) findSubscriptionID(ctx context.Context, subscription string) (string, error) { + authorizer, err := p.getAuthorizer() + if err != nil { + return "", err + } + sc := subscriptions.NewClient() + sc.Authorizer = authorizer + + it, err := sc.ListComplete(ctx) + if err != nil { + return "", errors.Wrapf(err, "error listing Azure subscriptions") + } + + var subscriptionId string + + // Iterate through all subscriptions to find the matching subscription name. + for it.NotDone() { + s := it.Value().SubscriptionID + name := it.Value().DisplayName + if s != nil && name != nil { + if *name == subscription { + subscriptionId = *s + break + } + } + if err = it.NextWithContext(ctx); err != nil { + return "", err + } + } + if subscriptionId == "" { + return "", errors.Newf("could not find Azure subscription: %s", subscription) + } + + return subscriptionId, nil +} + // getSubscription returns env.AZURE_SUBSCRIPTION_ID if it exists // or the ID of the defaultSubscription. // The value is memoized in the Provider instance. @@ -1556,43 +1611,19 @@ func (p *Provider) getSubscription(ctx context.Context) (string, error) { return p.mu.subscriptionId }() + // Use the saved subscriptionID. if subscriptionId != "" { return subscriptionId, nil } - subscriptionId = os.Getenv("AZURE_SUBSCRIPTION_ID") + subscriptionId = os.Getenv(SubscriptionIDEnvVar) // Fallback to retrieving the defaultSubscription. if subscriptionId == "" { - authorizer, err := p.getAuthorizer() - if err != nil { - return "", err - } - sc := subscriptions.NewClient() - sc.Authorizer = authorizer - - it, err := sc.ListComplete(ctx) + var err error + subscriptionId, err = p.findSubscriptionID(ctx, defaultSubscription) if err != nil { - return "", errors.Wrapf(err, "error listing Azure subscriptions") - } - - // Iterate through all subscriptions to find the defaultSubscription. - // We have to do this as Azure requires the ID not just the name. - for it.NotDone() { - s := it.Value().SubscriptionID - name := it.Value().DisplayName - if s != nil && name != nil { - if *name == defaultSubscription { - subscriptionId = *s - break - } - } - if err = it.NextWithContext(ctx); err != nil { - return "", err - } - } - if subscriptionId == "" { - return "", errors.Newf("Could not find default subscription: %s", defaultSubscription) + return "", errors.Wrapf(err, "Error finding default Azure subscription. Check that you have permission to view the subscription or use a different subscription by specifying the %s env var", SubscriptionIDEnvVar) } } diff --git a/pkg/roachprod/vm/azure/flags.go b/pkg/roachprod/vm/azure/flags.go index 36addb0b0768..7bc85bba2284 100644 --- a/pkg/roachprod/vm/azure/flags.go +++ b/pkg/roachprod/vm/azure/flags.go @@ -34,7 +34,7 @@ type ProviderOpts struct { // These default locations support availability zones. At the time of // this comment, `westus` did not and `westus2` is consistently out of // capacity. -var defaultLocations = []string{ +var DefaultLocations = []string{ "eastus", "canadacentral", "westus3", @@ -72,7 +72,7 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { "Machine type (see https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/)") flags.StringSliceVar(&o.Locations, ProviderName+"-locations", nil, fmt.Sprintf("Locations for cluster (see `az account list-locations`) (default\n[%s])", - strings.Join(defaultLocations, ","))) + strings.Join(DefaultLocations, ","))) flags.StringVar(&o.VnetName, ProviderName+"-vnet-name", "common", "The name of the VNet to use") flags.StringVar(&o.Zone, ProviderName+"-availability-zone", "", "Availability Zone to create VMs in") @@ -90,6 +90,8 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { func (o *ProviderOpts) ConfigureClusterFlags(*pflag.FlagSet, vm.MultipleProjectsOption) { } -// ConfigureClusterCleanupFlags is part of ProviderOpts. This implementation is a no-op. +// ConfigureClusterCleanupFlags is part of ProviderOpts. func (o *ProviderOpts) ConfigureClusterCleanupFlags(flags *pflag.FlagSet) { + flags.StringSliceVar(&providerInstance.SubscriptionNames, ProviderName+"-subscription-names", []string{}, + "Azure subscription names as a comma-separated string") } diff --git a/pkg/roachprod/vm/gce/gcloud.go b/pkg/roachprod/vm/gce/gcloud.go index aa11f04fe39c..73cc393c035b 100644 --- a/pkg/roachprod/vm/gce/gcloud.go +++ b/pkg/roachprod/vm/gce/gcloud.go @@ -346,6 +346,10 @@ type ProviderOpts struct { // Use an instance template and a managed instance group to create VMs. This // enables cluster resizing, load balancing, and health monitoring. Managed bool + // This specifies a subset of the Zones above that will run on spot instances. + // VMs running in Zones not in this list will be provisioned on-demand. This + // is only used by managed instance groups. + ManagedSpotZones []string // Enable the cron service. It is disabled by default. EnableCron bool @@ -954,7 +958,7 @@ type ProjectsVal struct { AcceptMultipleProjects bool } -// defaultZones is the list of zones used by default for cluster creation. +// DefaultZones is the list of zones used by default for cluster creation. // If the geo flag is specified, nodes are distributed between zones. // These are GCP zones available according to this page: // https://cloud.google.com/compute/docs/regions-zones#available @@ -964,7 +968,7 @@ type ProjectsVal struct { // ARM64 builds), but we randomize the specific zone. This is to avoid // "zone exhausted" errors in one particular zone, especially during // nightly roachtest runs. -func defaultZones(arch string) []string { +func DefaultZones(arch string) []string { zones := []string{"us-east1-b", "us-east1-c", "us-east1-d"} if vm.ParseArch(arch) == vm.ArchARM64 { // T2A instances are only available in us-central1 in NA. @@ -1033,6 +1037,8 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { _ = flags.MarkDeprecated("machine-type", "use "+ProviderName+"-machine-type instead") flags.StringSliceVar(&o.Zones, "zones", nil, "DEPRECATED") _ = flags.MarkDeprecated("zones", "use "+ProviderName+"-zones instead") + flags.StringSliceVar(&o.ManagedSpotZones, ProviderName+"-managed-spot-zones", nil, + "subset of zones in managed instance groups that will use spot instances") flags.StringVar(&providerInstance.ServiceAccount, ProviderName+"-service-account", providerInstance.ServiceAccount, "Service account to use") @@ -1064,7 +1070,7 @@ func (o *ProviderOpts) ConfigureCreateFlags(flags *pflag.FlagSet) { fmt.Sprintf("Zones for cluster. If zones are formatted as AZ:N where N is an integer, the zone\n"+ "will be repeated N times. If > 1 zone specified, nodes will be geo-distributed\n"+ "regardless of geo (default [%s])", - strings.Join(defaultZones(string(vm.ArchAMD64)), ","))) + strings.Join(DefaultZones(string(vm.ArchAMD64)), ","))) flags.BoolVar(&o.preemptible, ProviderName+"-preemptible", false, "use preemptible GCE instances (lifetime cannot exceed 24h)") flags.BoolVar(&o.UseSpot, ProviderName+"-use-spot", false, @@ -1279,9 +1285,9 @@ func computeZones(opts vm.CreateOpts, providerOpts *ProviderOpts) ([]string, err } if len(zones) == 0 { if opts.GeoDistributed { - zones = defaultZones(opts.Arch) + zones = DefaultZones(opts.Arch) } else { - zones = []string{defaultZones(opts.Arch)[0]} + zones = []string{DefaultZones(opts.Arch)[0]} } } if providerOpts.useArmAMI() { @@ -1437,29 +1443,37 @@ func (p *Provider) computeInstanceArgs( return args, cleanUpFn, nil } -func instanceTemplateName(clusterName string) string { - return fmt.Sprintf("%s-template", clusterName) +func instanceTemplateName(clusterName string, zone string) string { + return fmt.Sprintf("%s-template-%s", clusterName, zone) } func instanceGroupName(clusterName string) string { return fmt.Sprintf("%s-group", clusterName) } -// createInstanceTemplate creates an instance template for the cluster. This is -// currently only used for managed instance group clusters. -func createInstanceTemplate(clusterName string, instanceArgs []string, labelsArg string) error { - templateName := instanceTemplateName(clusterName) - createTemplateArgs := []string{"compute", "instance-templates", "create"} - createTemplateArgs = append(createTemplateArgs, instanceArgs...) - createTemplateArgs = append(createTemplateArgs, "--labels", labelsArg) - createTemplateArgs = append(createTemplateArgs, templateName) - - cmd := exec.Command("gcloud", createTemplateArgs...) - output, err := cmd.CombinedOutput() - if err != nil { - return errors.Wrapf(err, "Command: gcloud %s\nOutput: %s", createTemplateArgs, output) +// createInstanceTemplates creates instance templates for a cluster for each +// zone with the specified instance args for each template. This is currently +// only used for managed instance group clusters. +func createInstanceTemplates( + l *logger.Logger, clusterName string, zoneToInstanceArgs map[string][]string, labelsArg string, +) error { + g := ui.NewDefaultSpinnerGroup(l, "creating instance templates", len(zoneToInstanceArgs)) + for zone, args := range zoneToInstanceArgs { + templateName := instanceTemplateName(clusterName, zone) + createTemplateArgs := []string{"compute", "instance-templates", "create"} + createTemplateArgs = append(createTemplateArgs, args...) + createTemplateArgs = append(createTemplateArgs, "--labels", labelsArg) + createTemplateArgs = append(createTemplateArgs, templateName) + g.Go(func() error { + cmd := exec.Command("gcloud", createTemplateArgs...) + output, err := cmd.CombinedOutput() + if err != nil { + return errors.Wrapf(err, "Command: gcloud %s\nOutput: %s", createTemplateArgs, output) + } + return nil + }) } - return nil + return g.Wait() } // createInstanceGroups creates an instance group in each zone, for the cluster @@ -1467,11 +1481,9 @@ func createInstanceGroups( l *logger.Logger, project, clusterName string, zones []string, opts vm.CreateOpts, ) error { groupName := instanceGroupName(clusterName) - templateName := instanceTemplateName(clusterName) // Note that we set the IP addresses to be stateful, so that they remain the // same when instances are auto-healed, updated, or recreated. createGroupArgs := []string{"compute", "instance-groups", "managed", "create", - "--template", templateName, "--size", "0", "--stateful-external-ip", "enabled,auto-delete=on-permanent-instance-deletion", "--stateful-internal-ip", "enabled,auto-delete=on-permanent-instance-deletion", @@ -1498,7 +1510,11 @@ func createInstanceGroups( g := ui.NewDefaultSpinnerGroup(l, "creating instance groups", len(zones)) for _, zone := range zones { - argsWithZone := append(createGroupArgs[:len(createGroupArgs):len(createGroupArgs)], "--zone", zone) + templateName := instanceTemplateName(clusterName, zone) + argsWithZone := make([]string, len(createGroupArgs)) + copy(argsWithZone, createGroupArgs) + argsWithZone = append(argsWithZone, "--zone", zone) + argsWithZone = append(argsWithZone, "--template", templateName) g.Go(func() error { cmd := exec.Command("gcloud", argsWithZone...) output, err := cmd.CombinedOutput() @@ -1585,7 +1601,34 @@ func (p *Provider) Create( switch { case providerOpts.Managed: - err = createInstanceTemplate(opts.ClusterName, instanceArgs, labels) + zoneToInstanceArgs := make(map[string][]string) + for _, zone := range usedZones { + zoneToInstanceArgs[zone] = instanceArgs + } + // If spot instance are requested for specific zones, set the instance args + // for those zones to use spot instances. + if len(providerOpts.ManagedSpotZones) > 0 { + if providerOpts.UseSpot { + return errors.Newf("Use either --%[1]s-use-spot or --%[1]s-managed-spot-zones, not both", ProviderName) + } + spotProviderOpts := *providerOpts + spotProviderOpts.UseSpot = true + spotInstanceArgs, spotCleanUpFn, err := p.computeInstanceArgs(l, opts, &spotProviderOpts) + if spotCleanUpFn != nil { + defer spotCleanUpFn() + } + if err != nil { + return err + } + for _, zone := range providerOpts.ManagedSpotZones { + if _, ok := zoneToInstanceArgs[zone]; !ok { + return errors.Newf("the managed spot zone %q is not in the list of zones for the cluster", zone) + } + zoneToInstanceArgs[zone] = spotInstanceArgs + } + } + + err = createInstanceTemplates(l, opts.ClusterName, zoneToInstanceArgs, labels) if err != nil { return err } @@ -2339,8 +2382,7 @@ func listManagedInstanceGroups(project, groupName string) ([]jsonManagedInstance } // deleteInstanceTemplate deletes the instance template for the cluster. -func deleteInstanceTemplate(project, clusterName string) error { - templateName := instanceTemplateName(clusterName) +func deleteInstanceTemplate(project, templateName string) error { args := []string{"compute", "instance-templates", "delete", "--project", project, "--quiet", templateName} cmd := exec.Command("gcloud", args...) output, err := cmd.CombinedOutput() @@ -2418,9 +2460,19 @@ func (p *Provider) deleteManaged(l *logger.Logger, vms vm.List) error { // deleted. g = errgroup.Group{} for cluster, project := range clusterProjectMap { - g.Go(func() error { - return deleteInstanceTemplate(project /* project */, cluster /* cluster */) - }) + templates, err := listInstanceTemplates(project) + if err != nil { + return err + } + for _, template := range templates { + // Only delete templates that are part of the cluster. + if template.Properties.Labels[vm.TagCluster] != cluster { + continue + } + g.Go(func() error { + return deleteInstanceTemplate(project, template.Name) + }) + } } return g.Wait() } @@ -2617,6 +2669,7 @@ func (p *Provider) List(l *logger.Logger, opts vm.ListOptions) (vm.List, error) // Cluster (VM marked as empty) for it. This allows `Delete` to clean up // any MIG or instance template resources when there are no VMs to // derive it from. + clusterSeen := make(map[string]struct{}) for _, prj := range p.GetProjects() { projTemplatesInUse := templatesInUse[prj] if projTemplatesInUse == nil { @@ -2631,10 +2684,20 @@ func (p *Provider) List(l *logger.Logger, opts vm.ListOptions) (vm.List, error) if managed, ok := template.Properties.Labels[ManagedLabel]; !(ok && managed == "true") { continue } + // There can be multiple dangling templates for the same cluster. We + // only need to create one `EmptyCluster` VM for each cluster. + clusterName := template.Properties.Labels[vm.TagCluster] + if clusterName == "" { + continue + } + if _, ok := clusterSeen[clusterName]; ok { + continue + } + clusterSeen[clusterName] = struct{}{} // Create an `EmptyCluster` VM for templates that are not in use. if _, ok := projTemplatesInUse[template.Name]; !ok { vms = append(vms, vm.VM{ - Name: template.Name, + Name: vm.Name(clusterName, 0), Provider: ProviderName, Project: prj, Labels: template.Properties.Labels, @@ -2775,7 +2838,8 @@ func populateCostPerHour(l *logger.Logger, vms vm.List) error { } series, cpus, memory, err := decodeCustomType() if err != nil { - l.Errorf("Error estimating VM costs (will continue without): %v", err) + l.Errorf("Error estimating VM costs, "+ + "continuing without (consider ROACHPROD_NO_COST_ESTIMATES=true): %v", err) continue } workload.ComputeVmWorkload.MachineType = &cloudbilling.MachineType{ diff --git a/pkg/roachprod/vm/vm.go b/pkg/roachprod/vm/vm.go index cb27793f8061..1a6bd33125a1 100644 --- a/pkg/roachprod/vm/vm.go +++ b/pkg/roachprod/vm/vm.go @@ -441,6 +441,7 @@ type VolumeCreateOpts struct { } type ListOptions struct { + Username string // if set, -.* clusters are detected as 'mine' IncludeVolumes bool IncludeEmptyClusters bool ComputeEstimatedCost bool diff --git a/pkg/rpc/auth_tenant.go b/pkg/rpc/auth_tenant.go index 927973192030..84f9c055d98a 100644 --- a/pkg/rpc/auth_tenant.go +++ b/pkg/rpc/auth_tenant.go @@ -64,7 +64,7 @@ func (a tenantAuthorizer) authorize( return a.authBatch(ctx, sv, tenID, req.(*kvpb.BatchRequest)) case "/cockroach.roachpb.Internal/RangeLookup": - return a.authRangeLookup(tenID, req.(*kvpb.RangeLookupRequest)) + return a.authRangeLookup(ctx, tenID, req.(*kvpb.RangeLookupRequest)) case "/cockroach.roachpb.Internal/RangeFeed", "/cockroach.roachpb.Internal/MuxRangeFeed": return a.authRangeFeed(tenID, req.(*kvpb.RangeFeedRequest)) @@ -123,22 +123,22 @@ func (a tenantAuthorizer) authorize( return a.authTenant(tenID) case "/cockroach.server.serverpb.Status/SpanStats": - return a.authSpanStats(tenID, req.(*roachpb.SpanStatsRequest)) + return a.authSpanStats(ctx, tenID, req.(*roachpb.SpanStatsRequest)) case "/cockroach.roachpb.Internal/GetSpanConfigs": - return a.authGetSpanConfigs(tenID, req.(*roachpb.GetSpanConfigsRequest)) + return a.authGetSpanConfigs(ctx, tenID, req.(*roachpb.GetSpanConfigsRequest)) case "/cockroach.roachpb.Internal/SpanConfigConformance": - return a.authSpanConfigConformance(tenID, req.(*roachpb.SpanConfigConformanceRequest)) + return a.authSpanConfigConformance(ctx, tenID, req.(*roachpb.SpanConfigConformanceRequest)) case "/cockroach.roachpb.Internal/GetAllSystemSpanConfigsThatApply": return a.authGetAllSystemSpanConfigsThatApply(tenID, req.(*roachpb.GetAllSystemSpanConfigsThatApplyRequest)) case "/cockroach.roachpb.Internal/UpdateSpanConfigs": - return a.authUpdateSpanConfigs(tenID, req.(*roachpb.UpdateSpanConfigsRequest)) + return a.authUpdateSpanConfigs(ctx, tenID, req.(*roachpb.UpdateSpanConfigsRequest)) case "/cockroach.roachpb.Internal/GetRangeDescriptors": - return a.authGetRangeDescriptors(tenID, req.(*kvpb.GetRangeDescriptorsRequest)) + return a.authGetRangeDescriptors(ctx, tenID, req.(*kvpb.GetRangeDescriptorsRequest)) case "/cockroach.server.serverpb.Status/HotRangesV2": return a.authHotRangesV2(tenID) @@ -159,18 +159,32 @@ func (a tenantAuthorizer) authorize( "/cockroach.blobs.Blob/PutStream": return a.capabilitiesAuthorizer.HasNodelocalStorageCapability(ctx, tenID) + case "/cockroach.server.serverpb.Admin/ReadFromTenantInfo": + // NB: we don't check anything here as every tenant, even those who do not + // have HasCrossTenantRead, will call this even if only to learn that they + // are not a reader tenant. + return nil + default: return authErrorf("unknown method %q", fullMethod) } } func checkSpanBounds(rSpan, tenSpan roachpb.RSpan) error { - if !tenSpan.ContainsKeyRange(rSpan.Key, rSpan.EndKey) { - return authErrorf("requested key span %s not fully contained in tenant keyspace %s", rSpan, tenSpan) + if outsideTenant(rSpan, tenSpan) { + return spanErr(rSpan, tenSpan) } return nil } +func outsideTenant(rSpan, tenSpan roachpb.RSpan) bool { + return !tenSpan.ContainsKeyRange(rSpan.Key, rSpan.EndKey) +} + +func spanErr(rSpan, tenSpan roachpb.RSpan) error { + return authErrorf("requested key span %s not fully contained in tenant keyspace %s", rSpan, tenSpan) +} + // authBatch authorizes the provided tenant to invoke the Batch RPC with the // provided args. func (a tenantAuthorizer) authBatch( @@ -189,20 +203,27 @@ func (a tenantAuthorizer) authBatch( return authError(err.Error()) } tenSpan := tenantPrefix(tenID) - return checkSpanBounds(rSpan, tenSpan) + + if outsideTenant(rSpan, tenSpan) { + if args.IsReadOnly() && a.capabilitiesAuthorizer.HasCrossTenantRead(ctx, tenID) { + return nil + } + return spanErr(rSpan, tenSpan) + } + return nil } func (a tenantAuthorizer) authGetRangeDescriptors( - tenID roachpb.TenantID, args *kvpb.GetRangeDescriptorsRequest, + ctx context.Context, tenID roachpb.TenantID, args *kvpb.GetRangeDescriptorsRequest, ) error { - return validateSpan(tenID, args.Span) + return validateSpan(ctx, tenID, args.Span, true, a) } func (a tenantAuthorizer) authSpanStats( - tenID roachpb.TenantID, args *roachpb.SpanStatsRequest, + ctx context.Context, tenID roachpb.TenantID, args *roachpb.SpanStatsRequest, ) error { for _, span := range args.Spans { - err := validateSpan(tenID, span) + err := validateSpan(ctx, tenID, span, true, a) if err != nil { return err } @@ -213,10 +234,14 @@ func (a tenantAuthorizer) authSpanStats( // authRangeLookup authorizes the provided tenant to invoke the RangeLookup RPC // with the provided args. func (a tenantAuthorizer) authRangeLookup( - tenID roachpb.TenantID, args *kvpb.RangeLookupRequest, + ctx context.Context, tenID roachpb.TenantID, args *kvpb.RangeLookupRequest, ) error { tenSpan := tenantPrefix(tenID) if !tenSpan.ContainsKey(args.Key) { + // Allow it anyway if the tenant can read other tenants. + if a.capabilitiesAuthorizer.HasCrossTenantRead(ctx, tenID) { + return nil + } return authErrorf("requested key %s not fully contained in tenant keyspace %s", args.Key, tenSpan) } return nil @@ -332,10 +357,10 @@ func (a tenantAuthorizer) authGetAllSystemSpanConfigsThatApply( // authGetSpanConfigs authorizes the provided tenant to invoke the // GetSpanConfigs RPC with the provided args. func (a tenantAuthorizer) authGetSpanConfigs( - tenID roachpb.TenantID, args *roachpb.GetSpanConfigsRequest, + ctx context.Context, tenID roachpb.TenantID, args *roachpb.GetSpanConfigsRequest, ) error { for _, target := range args.Targets { - if err := validateSpanConfigTarget(tenID, target); err != nil { + if err := validateSpanConfigTarget(ctx, tenID, target, true, a); err != nil { return err } } @@ -345,15 +370,15 @@ func (a tenantAuthorizer) authGetSpanConfigs( // authUpdateSpanConfigs authorizes the provided tenant to invoke the // UpdateSpanConfigs RPC with the provided args. func (a tenantAuthorizer) authUpdateSpanConfigs( - tenID roachpb.TenantID, args *roachpb.UpdateSpanConfigsRequest, + ctx context.Context, tenID roachpb.TenantID, args *roachpb.UpdateSpanConfigsRequest, ) error { for _, entry := range args.ToUpsert { - if err := validateSpanConfigTarget(tenID, entry.Target); err != nil { + if err := validateSpanConfigTarget(ctx, tenID, entry.Target, false, a); err != nil { return err } } for _, target := range args.ToDelete { - if err := validateSpanConfigTarget(tenID, target); err != nil { + if err := validateSpanConfigTarget(ctx, tenID, target, false, a); err != nil { return err } } @@ -374,10 +399,10 @@ func (a tenantAuthorizer) authHotRangesV2(tenID roachpb.TenantID) error { // authSpanConfigConformance authorizes the provided tenant to invoke the // SpanConfigConformance RPC with the provided args. func (a tenantAuthorizer) authSpanConfigConformance( - tenID roachpb.TenantID, args *roachpb.SpanConfigConformanceRequest, + ctx context.Context, tenID roachpb.TenantID, args *roachpb.SpanConfigConformanceRequest, ) error { for _, sp := range args.Spans { - if err := validateSpan(tenID, sp); err != nil { + if err := validateSpan(ctx, tenID, sp, false, a); err != nil { return err } } @@ -415,7 +440,11 @@ func (a tenantAuthorizer) authTSDBQuery( // wholly contained within the tenant keyspace and system span config targets // must be well-formed. func validateSpanConfigTarget( - tenID roachpb.TenantID, spanConfigTarget roachpb.SpanConfigTarget, + ctx context.Context, + tenID roachpb.TenantID, + spanConfigTarget roachpb.SpanConfigTarget, + read bool, + a tenantAuthorizer, ) error { validateSystemTarget := func(target roachpb.SystemSpanConfigTarget) error { if target.SourceTenantID != tenID { @@ -444,7 +473,7 @@ func validateSpanConfigTarget( switch spanConfigTarget.Union.(type) { case *roachpb.SpanConfigTarget_Span: - return validateSpan(tenID, *spanConfigTarget.GetSpan()) + return validateSpan(ctx, tenID, *spanConfigTarget.GetSpan(), read, a) case *roachpb.SpanConfigTarget_SystemSpanConfigTarget: return validateSystemTarget(*spanConfigTarget.GetSystemSpanConfigTarget()) default: @@ -452,13 +481,22 @@ func validateSpanConfigTarget( } } -func validateSpan(tenID roachpb.TenantID, sp roachpb.Span) error { +func validateSpan( + ctx context.Context, tenID roachpb.TenantID, sp roachpb.Span, isRead bool, a tenantAuthorizer, +) error { tenSpan := tenantPrefix(tenID) rSpan, err := keys.SpanAddr(sp) if err != nil { return authError(err.Error()) } - return checkSpanBounds(rSpan, tenSpan) + if outsideTenant(rSpan, tenSpan) { + // Allow it anyway if the tenant can read other tenants. + if isRead && a.capabilitiesAuthorizer.HasCrossTenantRead(ctx, tenID) { + return nil + } + return spanErr(rSpan, tenSpan) + } + return nil } const tenantLoggingTag = "client-tenant" diff --git a/pkg/rpc/auth_test.go b/pkg/rpc/auth_test.go index 2a032441407e..f656522965ba 100644 --- a/pkg/rpc/auth_test.go +++ b/pkg/rpc/auth_test.go @@ -32,6 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/spanconfig" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/errors" @@ -920,20 +921,46 @@ func TestTenantAuthRequest(t *testing.T) { {req: "req", expErr: `unknown method "/cockroach.rpc.Testing/Foo"`}, }, } { - t.Run(method, func(t *testing.T) { + t.Run(strings.ReplaceAll(method, "/", "_"), func(t *testing.T) { ctx := context.Background() for _, tc := range tests { t.Run("", func(t *testing.T) { - err := rpc.TestingAuthorizeTenantRequest( - ctx, &settings.Values{}, tenID, method, tc.req, tenantcapabilitiesauthorizer.NewAllowEverythingAuthorizer(), - ) - if tc.expErr == noError { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Equal(t, codes.Unauthenticated, status.Code(err)) - require.Regexp(t, tc.expErr, err) - } + testutils.RunTrueAndFalse(t, "cross", func(t *testing.T, canCrossRead bool) { + err := rpc.TestingAuthorizeTenantRequest(ctx, &settings.Values{}, tenID, method, tc.req, mockAuthorizer{ + hasCrossTenantRead: canCrossRead, + hasCapabilityForBatch: true, + hasNodestatusCapability: true, + hasTSDBQueryCapability: true, + hasNodelocalStorageCapability: true, + hasExemptFromRateLimiterCapability: true, + hasTSDBAllCapability: true, + }) + + // If the "expected" error is about tenant bounds but the tenant has + // cross-read capability and the request is a read, expect no error. + if canCrossRead && strings.Contains(tc.expErr, "fully contained") { + switch method { + case "/cockroach.roachpb.Internal/Batch": + if tc.req.(*kvpb.BatchRequest).IsReadOnly() { + tc.expErr = noError + } + case "/cockroach.roachpb.Internal/GetRangeDescriptors": + tc.expErr = noError + case "/cockroach.roachpb.Internal/RangeLookup": + tc.expErr = noError + case "/cockroach.roachpb.Internal/GetSpanConfigs": + tc.expErr = noError + } + } + + if tc.expErr == noError { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, codes.Unauthenticated, status.Code(err)) + require.Regexp(t, tc.expErr, err) + } + }) }) } }) @@ -1069,6 +1096,7 @@ func TestTenantAuthCapabilityChecks(t *testing.T) { } type mockAuthorizer struct { + hasCrossTenantRead bool hasCapabilityForBatch bool hasNodestatusCapability bool hasTSDBQueryCapability bool @@ -1092,6 +1120,10 @@ func (m mockAuthorizer) HasProcessDebugCapability( return errors.New("tenant does not have capability") } +func (m mockAuthorizer) HasCrossTenantRead(ctx context.Context, tenID roachpb.TenantID) bool { + return m.hasCrossTenantRead +} + var _ tenantcapabilities.Authorizer = &mockAuthorizer{} // HasCapabilityForBatch implements the tenantcapabilities.Authorizer interface. diff --git a/pkg/rpc/context.go b/pkg/rpc/context.go index afd3351d932d..3d9ec0bc69ac 100644 --- a/pkg/rpc/context.go +++ b/pkg/rpc/context.go @@ -288,6 +288,16 @@ func (c *Context) SetLoopbackDialer(loopbackDialFn func(context.Context) (net.Co c.loopbackDialFn = loopbackDialFn } +// StoreLivenessGracePeriod computes the grace period after a store restarts before which it will +// not withdraw support from other stores. +func (c *Context) StoreLivenessWithdrawalGracePeriod() time.Duration { + // RPCHeartbeatInterval and RPCHeartbeatTimeout ensure the remote store + // probes the RPC connection to the local store. DialTimeout ensures the + // remote store has enough time to dial the local store, and NetworkTimeout + // ensures the remote store's heartbeat is received by the local store. + return c.RPCHeartbeatInterval + c.RPCHeartbeatTimeout + base.DialTimeout + base.NetworkTimeout +} + // ContextOptions are passed to NewContext to set up a new *Context. // All pointer fields and TenantID are required. type ContextOptions struct { diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go index 8313e27f4dec..82589ae62946 100644 --- a/pkg/rpc/context_test.go +++ b/pkg/rpc/context_test.go @@ -235,8 +235,12 @@ func testClockOffsetInPingRequestInternal(t *testing.T, clientOnly bool) { t.Logf("client dial") // Dial: this causes the heartbeats to start. remoteAddr := ln.Addr().String() - _, err = rpcCtxClient.GRPCDialNode(remoteAddr, 1, roachpb.Locality{}, SystemClass).Connect(ctx) - require.NoError(t, err) + testutils.SucceedsSoon(t, func() error { + _, err = rpcCtxClient.GRPCDialNode( + remoteAddr, 1, roachpb.Locality{}, SystemClass, + ).Connect(ctx) + return err + }) // The first ping establishes the TCP+TLS connection and uses a blocking dialback, // so it's usually pretty noisy in terms of detecting clock offsets. The second @@ -288,12 +292,12 @@ func (s *rangefeedEventSink) Context() context.Context { return s.ctx } -// Note that Send itself is not thread-safe (grpc stream is not thread-safe), -// but tests were written in a way that sends sequentially, ensuring -// thread-safety for Send. -func (s *rangefeedEventSink) SendIsThreadSafe() {} +// Note that SendUnbuffered itself is not thread-safe (grpc stream is not +// thread-safe), but tests were written in a way that sends sequentially, +// ensuring thread-safety for SendUnbuffered. +func (s *rangefeedEventSink) SendUnbufferedIsThreadSafe() {} -func (s *rangefeedEventSink) Send(event *kvpb.RangeFeedEvent) error { +func (s *rangefeedEventSink) SendUnbuffered(event *kvpb.RangeFeedEvent) error { return s.stream.Send(&kvpb.MuxRangeFeedEvent{RangeFeedEvent: *event}) } @@ -320,7 +324,7 @@ func (*internalServer) RangeLookup( func (s *internalServer) singleRangeFeed(sink kvpb.RangeFeedEventSink) error { for _, ev := range s.rangeFeedEvents { evCpy := ev - if err := sink.Send(&evCpy); err != nil { + if err := sink.SendUnbuffered(&evCpy); err != nil { return err } } diff --git a/pkg/rpc/metrics.go b/pkg/rpc/metrics.go index aff0edd72448..dbb331fb7a64 100644 --- a/pkg/rpc/metrics.go +++ b/pkg/rpc/metrics.go @@ -147,8 +147,10 @@ func (m *Metrics) makeLabels(k peerKey, remoteLocality roachpb.Locality) []strin } childLabels := []string{} + matching := true for i := 0; i < length; i++ { + childLabels = append(childLabels, m.locality.Tiers[i].Value) if matching { childLabels = append(childLabels, remoteLocality.Tiers[i].Value) if m.locality.Tiers[i].Value != remoteLocality.Tiers[i].Value { @@ -161,6 +163,7 @@ func (m *Metrics) makeLabels(k peerKey, remoteLocality roachpb.Locality) []strin } // Pad with empty strings if the remote locality is shorter than ours. for i := length; i < localLen; i++ { + childLabels = append(childLabels, m.locality.Tiers[i].Value) childLabels = append(childLabels, "") } return childLabels @@ -170,7 +173,8 @@ func newMetrics(locality roachpb.Locality) *Metrics { childLabels := []string{"remote_node_id", "remote_addr", "class"} localityLabels := []string{} for _, tier := range locality.Tiers { - localityLabels = append(localityLabels, tier.Key) + localityLabels = append(localityLabels, "source_"+tier.Key) + localityLabels = append(localityLabels, "destination_"+tier.Key) } m := Metrics{ locality: locality, diff --git a/pkg/security/auth.go b/pkg/security/auth.go index 5fb991f3f9af..f3c12a1bf506 100644 --- a/pkg/security/auth.go +++ b/pkg/security/auth.go @@ -97,8 +97,9 @@ func UnsetNodeSubject() { // GetCertificateUserScope function expands a cert into a set of "scopes" with // each possible username (and tenant ID). type CertificateUserScope struct { - Username string - TenantID roachpb.TenantID + Username string + TenantID roachpb.TenantID + TenantName roachpb.TenantName // global is set to true to indicate that the certificate unscoped to // any tenant is a global client certificate which can authenticate // on any tenant. This is ONLY for backward compatibility with old @@ -212,15 +213,26 @@ func GetCertificateUserScope( peerCert *x509.Certificate, ) (userScopes []CertificateUserScope, _ error) { for _, uri := range peerCert.URIs { - uriString := uri.String() - if URISANHasCRDBPrefix(uriString) { - tenantID, user, err := ParseTenantURISAN(uriString) - if err != nil { - return nil, err - } - scope := CertificateUserScope{ - Username: user, - TenantID: tenantID, + if isCRDBSANURI(uri) { + var scope CertificateUserScope + if isTenantNameSANURI(uri) { + tenantName, user, err := parseTenantNameURISAN(uri) + if err != nil { + return nil, err + } + scope = CertificateUserScope{ + Username: user, + TenantName: tenantName, + } + } else { + tenantID, user, err := parseTenantURISAN(uri) + if err != nil { + return nil, err + } + scope = CertificateUserScope{ + Username: user, + TenantID: tenantID, + } } userScopes = append(userScopes, scope) } diff --git a/pkg/security/auth_test.go b/pkg/security/auth_test.go index 4f6d9633df13..a702e5de9b04 100644 --- a/pkg/security/auth_test.go +++ b/pkg/security/auth_test.go @@ -249,6 +249,50 @@ func TestGetCertificateUserScope(t *testing.T) { require.True(t, userScopes[0].Global) } }) + + t.Run("extracts username, tenantName from tenant-name URI SAN", func(t *testing.T) { + state := makeFakeTLSState(t, "(CN=foo)uri:crdb://tenant-name/tenant10/user/foo;(CN=CA)") + cert := state.PeerCertificates[0] + if userScopes, err := security.GetCertificateUserScope(cert); err != nil { + t.Error(err) + } else { + require.Equal(t, 1, len(userScopes)) + require.Equal(t, "foo", userScopes[0].Username) + require.Equal(t, roachpb.TenantName("tenant10"), userScopes[0].TenantName) + require.False(t, userScopes[0].Global) + } + }) + + t.Run("extracts username, tenantName from tenant-name URI SAN with URI scheme in upper case", func(t *testing.T) { + state := makeFakeTLSState(t, "(CN=foo)uri:CRDB://tenant-name/tenant10/user/foo;(CN=CA)") + cert := state.PeerCertificates[0] + if userScopes, err := security.GetCertificateUserScope(cert); err != nil { + t.Error(err) + } else { + require.Equal(t, 1, len(userScopes)) + require.Equal(t, "foo", userScopes[0].Username) + require.Equal(t, roachpb.TenantName("tenant10"), userScopes[0].TenantName) + require.False(t, userScopes[0].Global) + } + }) + + t.Run("extracts both tenant URI SAN and tenant name URI SAN when both are present", func(t *testing.T) { + state := makeFakeTLSState(t, "(CN=foo)uri:crdb://tenant-name/tenant10/user/bar,uri:crdb://tenant/123/user/foo;(CN=CA)") + cert := state.PeerCertificates[0] + if userScopes, err := security.GetCertificateUserScope(cert); err != nil { + t.Error(err) + } else { + require.Equal(t, 2, len(userScopes)) + + require.Equal(t, "bar", userScopes[0].Username) + require.Equal(t, roachpb.TenantName("tenant10"), userScopes[0].TenantName) + require.False(t, userScopes[0].Global) + + require.Equal(t, "foo", userScopes[1].Username) + require.Equal(t, roachpb.MustMakeTenantID(123), userScopes[1].TenantID) + require.False(t, userScopes[1].Global) + } + }) } func TestSetCertPrincipalMap(t *testing.T) { diff --git a/pkg/security/certificate_manager_test.go b/pkg/security/certificate_manager_test.go index bec0af204117..d7ebbfeb7091 100644 --- a/pkg/security/certificate_manager_test.go +++ b/pkg/security/certificate_manager_test.go @@ -96,7 +96,13 @@ func TestManagerWithPrincipalMap(t *testing.T) { certsDir, caKey, testKeySize, time.Hour*96, true, true, )) require.NoError(t, security.CreateClientPair( - certsDir, caKey, testKeySize, time.Hour*48, true, username.TestUserName(), []roachpb.TenantID{roachpb.SystemTenantID}, false, + certsDir, caKey, testKeySize, time.Hour*48, true, username.TestUserName(), + []roachpb.TenantID{roachpb.SystemTenantID}, nil /* tenantNameScope */, false, + )) + require.NoError(t, security.CreateClientPair( + certsDir, caKey, testKeySize, time.Hour*48, true, username.TestUserName(), + nil /* tenantScope */, []roachpb.TenantName{roachpb.TenantName(roachpb.SystemTenantID.String())}, + false, )) require.NoError(t, security.CreateNodePair( certsDir, caKey, testKeySize, time.Hour*48, true, []string{"127.0.0.1", "foo"}, diff --git a/pkg/security/certs.go b/pkg/security/certs.go index c6c4f19ab61a..409684b27fdf 100644 --- a/pkg/security/certs.go +++ b/pkg/security/certs.go @@ -375,6 +375,7 @@ func CreateClientPair( overwrite bool, user username.SQLUsername, tenantIDs []roachpb.TenantID, + tenantNames []roachpb.TenantName, wantPKCS8Key bool, ) error { if len(caKeyPath) == 0 { @@ -411,7 +412,7 @@ func CreateClientPair( return errors.Wrap(err, "could not generate new client key") } - clientCert, err := GenerateClientCert(caCert, caPrivateKey, clientKey.Public(), lifetime, user, tenantIDs) + clientCert, err := GenerateClientCert(caCert, caPrivateKey, clientKey.Public(), lifetime, user, tenantIDs, tenantNames) if err != nil { return errors.Wrap(err, "error creating client certificate and key") } diff --git a/pkg/security/certs_test.go b/pkg/security/certs_test.go index f6a10fde5a91..e0a68af34526 100644 --- a/pkg/security/certs_test.go +++ b/pkg/security/certs_test.go @@ -183,47 +183,84 @@ func TestGenerateClientCerts(t *testing.T) { securityassets.ResetLoader() defer ResetTest() - certsDir := t.TempDir() - - caKeyFile := certsDir + "/ca.key" - // Generate CA key and crt. - require.NoError(t, security.CreateCAPair(certsDir, caKeyFile, testKeySize, - time.Hour*72, false /* allowReuse */, false /* overwrite */)) - user := username.MakeSQLUsernameFromPreNormalizedString("user") - tenantIDs := []roachpb.TenantID{roachpb.SystemTenantID, roachpb.MustMakeTenantID(123)} - // Create tenant-scoped client cert. - require.NoError(t, security.CreateClientPair( - certsDir, - caKeyFile, - testKeySize, - 48*time.Hour, - false, /*overwrite */ - user, - tenantIDs, - false /* wantPKCS8Key */)) + type testCase struct { + desc string + tenantIDs []uint64 + tenantNames []string + } - // Load and verify the certificates. - cl := security.NewCertificateLoader(certsDir) - require.NoError(t, cl.Load()) - infos := cl.Certificates() - for _, info := range infos { - require.NoError(t, info.Error) + testCases := []testCase{ + { + desc: "test_with_tenant_id_scope", + tenantIDs: []uint64{123}, + }, + { + desc: "test_with_tenant_name_scope", + tenantNames: []string{"tenant10"}, + }, + { + desc: "test_with_tenant_id_and_tanent_name_scope", + tenantIDs: []uint64{123}, + tenantNames: []string{"tenant10"}, + }, } - // We expect two certificates: the CA certificate and the tenant scoped client certificate. - require.Equal(t, 2, len(infos)) - expectedClientCrtName := fmt.Sprintf("client.%s.crt", user) - expectedSANs, err := security.MakeTenantURISANs(user, tenantIDs) - require.NoError(t, err) - for _, info := range infos { - if info.Filename == "ca.crt" { - continue + for _, tc := range testCases { + certsDir := t.TempDir() + + caKeyFile := certsDir + "/ca.key" + // Generate CA key and crt. + require.NoError(t, security.CreateCAPair(certsDir, caKeyFile, testKeySize, + time.Hour*72, false /* allowReuse */, false /* overwrite */)) + + tenantIDs := []roachpb.TenantID{roachpb.SystemTenantID} + for _, tenantID := range tc.tenantIDs { + tenantIDs = append(tenantIDs, roachpb.MustMakeTenantID(tenantID)) + } + var tenantNames []roachpb.TenantName + for _, tenantName := range tc.tenantNames { + tenantNames = append(tenantNames, roachpb.TenantName(tenantName)) + } + + // Create tenant-scoped client cert. + user := username.MakeSQLUsernameFromPreNormalizedString("user") + require.NoError(t, security.CreateClientPair( + certsDir, + caKeyFile, + testKeySize, + 48*time.Hour, + false, /*overwrite */ + user, + tenantIDs, + tenantNames, + false /* wantPKCS8Key */)) + + // Load and verify the certificates. + cl := security.NewCertificateLoader(certsDir) + require.NoError(t, cl.Load()) + infos := cl.Certificates() + for _, info := range infos { + require.NoError(t, info.Error) + } + + // We expect two certificates: the CA certificate and the tenant scoped client certificate. + require.Equal(t, 2, len(infos)) + expectedClientCrtName := fmt.Sprintf("client.%s.crt", user) + expectedTenantIDSANs, err := security.MakeTenantURISANs(user, tenantIDs) + require.NoError(t, err) + expectedTenantNameSANs, err := security.MakeTenantNameURISANs(user, tenantNames) + require.NoError(t, err) + expectedSANs := append(expectedTenantIDSANs, expectedTenantNameSANs...) + for _, info := range infos { + if info.Filename == "ca.crt" { + continue + } + require.Equal(t, security.ClientPem, info.FileUsage) + require.Equal(t, expectedClientCrtName, info.Filename) + require.Equal(t, 1, len(info.ParsedCertificates)) + require.Equal(t, len(tenantIDs)+len(tenantNames), len(info.ParsedCertificates[0].URIs)) + require.Equal(t, expectedSANs, info.ParsedCertificates[0].URIs) } - require.Equal(t, security.ClientPem, info.FileUsage) - require.Equal(t, expectedClientCrtName, info.Filename) - require.Equal(t, 1, len(info.ParsedCertificates)) - require.Equal(t, len(tenantIDs), len(info.ParsedCertificates[0].URIs)) - require.Equal(t, expectedSANs, info.ParsedCertificates[0].URIs) } } @@ -290,6 +327,7 @@ func generateBaseCerts(certsDir string, clientCertLifetime time.Duration) error true, username.RootUserName(), []roachpb.TenantID{roachpb.SystemTenantID}, + nil, /* tenantNames */ false, ); err != nil { return err @@ -344,14 +382,16 @@ func generateSplitCACerts(certsDir string) error { if err := security.CreateClientPair( certsDir, filepath.Join(certsDir, certnames.EmbeddedClientCAKey), - testKeySize, time.Hour*48, true, username.NodeUserName(), []roachpb.TenantID{roachpb.SystemTenantID}, false, + testKeySize, time.Hour*48, true, username.NodeUserName(), + []roachpb.TenantID{roachpb.SystemTenantID}, nil /* tenantNames */, false, ); err != nil { return errors.Wrap(err, "could not generate Client pair") } if err := security.CreateClientPair( certsDir, filepath.Join(certsDir, certnames.EmbeddedClientCAKey), - testKeySize, time.Hour*48, true, username.RootUserName(), []roachpb.TenantID{roachpb.SystemTenantID}, false, + testKeySize, time.Hour*48, true, username.RootUserName(), + []roachpb.TenantID{roachpb.SystemTenantID}, nil, false, ); err != nil { return errors.Wrap(err, "could not generate Client pair") } diff --git a/pkg/security/x509.go b/pkg/security/x509.go index 9089dadead87..f89424910659 100644 --- a/pkg/security/x509.go +++ b/pkg/security/x509.go @@ -35,11 +35,13 @@ import ( const ( // Make certs valid a day before to handle clock issues, specifically // boot2docker: https://github.com/boot2docker/boot2docker/issues/69 - validFrom = -time.Hour * 24 - maxPathLength = 1 - caCommonName = "Cockroach CA" - tenantURISANPrefixString = "crdb://" - tenantURISANFormatString = tenantURISANPrefixString + "tenant/%d/user/%s" + validFrom = -time.Hour * 24 + maxPathLength = 1 + caCommonName = "Cockroach CA" + tenantURISANSchemeString = "crdb" + tenantNamePrefixString = "tenant-name" + tenantURISANFormatString = tenantURISANSchemeString + "://" + "tenant/%d/user/%s" + tenantNameURISANFormatString = tenantURISANSchemeString + "://" + tenantNamePrefixString + "/%s/user/%s" // TenantsOU is the OrganizationalUnit that determines a client certificate should be treated as a tenant client // certificate (as opposed to a KV node client certificate). @@ -253,7 +255,8 @@ func GenerateClientCert( clientPublicKey crypto.PublicKey, lifetime time.Duration, user username.SQLUsername, - tenantID []roachpb.TenantID, + tenantIDs []roachpb.TenantID, + tenantNames []roachpb.TenantName, ) ([]byte, error) { // TODO(marc): should we add extra checks? @@ -275,11 +278,17 @@ func GenerateClientCert( // Set client-specific fields. // Client authentication only. template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - urls, err := MakeTenantURISANs(user, tenantID) + tenantIDURLs, err := MakeTenantURISANs(user, tenantIDs) if err != nil { return nil, err } - template.URIs = append(template.URIs, urls...) + tenantNameURLs, err := MakeTenantNameURISANs(user, tenantNames) + if err != nil { + return nil, err + } + template.URIs = append(template.URIs, tenantIDURLs...) + template.URIs = append(template.URIs, tenantNameURLs...) + certBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, clientPublicKey, caPrivateKey) if err != nil { return nil, err @@ -334,13 +343,34 @@ func MakeTenantURISANs( return urls, nil } -// URISANHasCRDBPrefix indicates whether a URI string has the tenant URI SAN prefix. -func URISANHasCRDBPrefix(rawlURI string) bool { - return strings.HasPrefix(rawlURI, tenantURISANPrefixString) +// MakeTenantNameURISANs constructs the tenant name SAN URI for the client certificate. +func MakeTenantNameURISANs( + username username.SQLUsername, tenantNames []roachpb.TenantName, +) ([]*url.URL, error) { + urls := make([]*url.URL, 0, len(tenantNames)) + for _, tenantName := range tenantNames { + uri, err := url.Parse(fmt.Sprintf(tenantNameURISANFormatString, tenantName, username.Normalized())) + if err != nil { + return nil, err + } + urls = append(urls, uri) + } + return urls, nil +} + +// isCRDBSANURI indicates whether the URI uses CRDB scheme. +func isCRDBSANURI(uri *url.URL) bool { + return uri.Scheme == tenantURISANSchemeString +} + +// isTenantNameSANURI indicates whether the URI is using tenant name to identify the tenant. +func isTenantNameSANURI(uri *url.URL) bool { + return uri.Host == tenantNamePrefixString } -// ParseTenantURISAN extracts the user and tenant ID contained within a tenant URI SAN. -func ParseTenantURISAN(rawURL string) (roachpb.TenantID, string, error) { +// parseTenantURISAN extracts the user and tenant ID contained within a tenant URI SAN. +func parseTenantURISAN(uri *url.URL) (roachpb.TenantID, string, error) { + rawURL := uri.String() r := strings.NewReader(rawURL) var tID uint64 var username string @@ -350,3 +380,16 @@ func ParseTenantURISAN(rawURL string) (roachpb.TenantID, string, error) { } return roachpb.MustMakeTenantID(tID), username, nil } + +// parseTenantNameURISAN extracts the user and tenant name contained within a tenant URI SAN. +func parseTenantNameURISAN(uri *url.URL) (roachpb.TenantName, string, error) { + if !isCRDBSANURI(uri) || !isTenantNameSANURI(uri) { + return roachpb.TenantName(""), "", errors.Errorf("invalid tenant-name URI SAN %q", uri.String()) + } + parts := strings.Split(uri.Path, "/") + if len(parts) != 4 { + return roachpb.TenantName(""), "", errors.Errorf("invalid tenant-name URI SAN %q", uri.String()) + } + tenantName, username := parts[1], parts[3] + return roachpb.TenantName(tenantName), username, nil +} diff --git a/pkg/security/x509_test.go b/pkg/security/x509_test.go index 987da23352d5..ba1cc85e3c1e 100644 --- a/pkg/security/x509_test.go +++ b/pkg/security/x509_test.go @@ -88,15 +88,16 @@ func TestGenerateCertLifetime(t *testing.T) { // Create a Client certificate expiring in 4 days. Should get reduced to the CA lifetime. clientDuration := time.Hour * 96 - _, err = security.GenerateClientCert(caCert, testKey, testKey.Public(), clientDuration, username.TestUserName(), []roachpb.TenantID{roachpb.SystemTenantID}) + _, err = security.GenerateClientCert(caCert, testKey, testKey.Public(), clientDuration, username.TestUserName(), + []roachpb.TenantID{roachpb.SystemTenantID}, nil) if !testutils.IsError(err, "CA lifetime is .*, shorter than the requested .*") { t.Fatal(err) } // Try again, but expiring before the CA cert. clientDuration = time.Hour * 24 - clientBytes, err := security.GenerateClientCert(caCert, testKey, testKey.Public(), clientDuration, username.TestUserName(), []roachpb.TenantID{roachpb.SystemTenantID}) - + clientBytes, err := security.GenerateClientCert(caCert, testKey, testKey.Public(), clientDuration, + username.TestUserName(), []roachpb.TenantID{roachpb.SystemTenantID}, nil) if err != nil { t.Fatal(err) } @@ -109,5 +110,4 @@ func TestGenerateCertLifetime(t *testing.T) { if a, e := clientCert.NotAfter, now.Add(clientDuration); !timesFuzzyEqual(a, e) { t.Fatalf("client expiration differs from requested: %s vs %s", a, e) } - } diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 63261e6a65e2..3f31f134d047 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -132,6 +132,7 @@ go_library( "//pkg/kv/kvserver/kvflowcontrol/kvflowdispatch", "//pkg/kv/kvserver/kvflowcontrol/kvflowhandle", "//pkg/kv/kvserver/kvflowcontrol/node_rac2", + "//pkg/kv/kvserver/kvflowcontrol/rac2", "//pkg/kv/kvserver/kvserverbase", "//pkg/kv/kvserver/kvserverpb", "//pkg/kv/kvserver/kvstorage", @@ -146,6 +147,7 @@ go_library( "//pkg/kv/kvserver/rangefeed", "//pkg/kv/kvserver/rangelog", "//pkg/kv/kvserver/reports", + "//pkg/kv/kvserver/storeliveness", "//pkg/multitenant", "//pkg/multitenant/mtinfopb", "//pkg/multitenant/multitenantcpu", @@ -180,6 +182,7 @@ go_library( "//pkg/server/diagnostics", "//pkg/server/diagnostics/diagnosticspb", "//pkg/server/goroutinedumper", + "//pkg/server/license", "//pkg/server/pgurl", "//pkg/server/privchecker", "//pkg/server/profiler", diff --git a/pkg/server/admin.go b/pkg/server/admin.go index 9e44240abcdc..d3dbe3e3545e 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -37,6 +37,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -81,6 +82,7 @@ import ( gwutil "github.com/grpc-ecosystem/grpc-gateway/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" grpcstatus "google.golang.org/grpc/status" ) @@ -275,13 +277,15 @@ func (s *adminServer) RegisterGateway( http.Error(w, "invalid id", http.StatusBadRequest) return } - // Add default user when running in Insecure mode because we don't - // retrieve the user from gRPC metadata (which falls back to `root`) - // but from HTTP metadata (which does not). - if s.sqlServer.cfg.Insecure { - ctx := req.Context() - ctx = authserver.ContextWithHTTPAuthInfo(ctx, username.RootUser, 0) - req = req.WithContext(ctx) + + // The privilege checks in the privilege checker below checks the user in the incoming + // gRPC metadata. + md := authserver.TranslateHTTPAuthInfoToGRPCMetadata(req.Context(), req) + authCtx := metadata.NewIncomingContext(req.Context(), md) + authCtx = s.AnnotateCtx(authCtx) + if err := s.privilegeChecker.RequireViewActivityAndNoViewActivityRedactedPermission(authCtx); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return } s.getStatementBundle(req.Context(), id, w) }) @@ -2601,12 +2605,12 @@ func (s *adminServer) QueryPlan( } // getStatementBundle retrieves the statement bundle with the given id and -// writes it out as an attachment. +// writes it out as an attachment. Note this function assumes the user has +// permission to access the statement bundle. func (s *adminServer) getStatementBundle(ctx context.Context, id int64, w http.ResponseWriter) { - sqlUsername := authserver.UserFromHTTPAuthInfoContext(ctx) row, err := s.internalExecutor.QueryRowEx( ctx, "admin-stmt-bundle", nil, /* txn */ - sessiondata.InternalExecutorOverride{User: sqlUsername}, + sessiondata.NodeUserSessionDataOverride, "SELECT bundle_chunks FROM system.statement_diagnostics WHERE id=$1 AND bundle_chunks IS NOT NULL", id, ) @@ -2625,7 +2629,7 @@ func (s *adminServer) getStatementBundle(ctx context.Context, id int64, w http.R for _, chunkID := range chunkIDs { chunkRow, err := s.internalExecutor.QueryRowEx( ctx, "admin-stmt-bundle", nil, /* txn */ - sessiondata.InternalExecutorOverride{User: sqlUsername}, + sessiondata.NodeUserSessionDataOverride, "SELECT data FROM system.statement_bundle_chunks WHERE id=$1", chunkID, ) @@ -4019,3 +4023,53 @@ func (s *systemAdminServer) ListTenants( Tenants: tenantList, }, nil } + +// ReadFromTenantInfo returns the read-from info for a tenant, if configured. +func (s *systemAdminServer) ReadFromTenantInfo( + ctx context.Context, req *serverpb.ReadFromTenantInfoRequest, +) (*serverpb.ReadFromTenantInfoResponse, error) { + tenantID, ok := roachpb.ClientTenantFromContext(ctx) + if ok && req.TenantID != tenantID { + return nil, errors.Errorf("mismatched tenant IDs") + } + tenantID = req.TenantID + if tenantID.IsSystem() { + return &serverpb.ReadFromTenantInfoResponse{}, nil + } + + var dstID roachpb.TenantID + var dstTenant *mtinfopb.TenantInfo + if err := s.sqlServer.internalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + found, err := sql.GetTenantRecordByID(ctx, txn, tenantID, s.st) + if err != nil { + return err + } + if found.ReadFromTenant == nil || !found.ReadFromTenant.IsSet() { + return nil + } + dstID = *found.ReadFromTenant + target, err := sql.GetTenantRecordByID(ctx, txn, dstID, s.st) + if err != nil { + return err + } + dstTenant = target + return nil + }); err != nil { + return nil, err + } + + if dstTenant == nil { + return &serverpb.ReadFromTenantInfoResponse{}, nil + } + + if dstTenant.PhysicalReplicationConsumerJobID == 0 { + return nil, errors.Errorf("missing job ID") + } + + progress, err := jobs.LoadJobProgress(ctx, s.sqlServer.internalDB, dstTenant.PhysicalReplicationConsumerJobID) + if err != nil { + return nil, err + } + + return &serverpb.ReadFromTenantInfoResponse{ReadFrom: dstID, ReadAt: progress.GetStreamIngest().ReplicatedTime}, nil +} diff --git a/pkg/server/admission.go b/pkg/server/admission.go index 9b957f81fe9c..a8ec7cc9e28b 100644 --- a/pkg/server/admission.go +++ b/pkg/server/admission.go @@ -15,43 +15,42 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowcontrolpb" - "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/admission" - "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" ) type admittedLogEntryAdaptor struct { - dispatchWriter kvflowcontrol.DispatchWriter + dispatchWriterV1 kvflowcontrol.DispatchWriter + callbackV2 admission.OnLogEntryAdmitted } var _ admission.OnLogEntryAdmitted = &admittedLogEntryAdaptor{} func newAdmittedLogEntryAdaptor( - dispatchWriter kvflowcontrol.DispatchWriter, + dispatchWriterV1 kvflowcontrol.DispatchWriter, callbackV2 admission.OnLogEntryAdmitted, ) *admittedLogEntryAdaptor { return &admittedLogEntryAdaptor{ - dispatchWriter: dispatchWriter, + dispatchWriterV1: dispatchWriterV1, + callbackV2: callbackV2, } } // AdmittedLogEntry implements the admission.OnLogEntryAdmitted interface. func (a *admittedLogEntryAdaptor) AdmittedLogEntry( - ctx context.Context, - origin roachpb.NodeID, - pri admissionpb.WorkPriority, - storeID roachpb.StoreID, - rangeID roachpb.RangeID, - pos admission.LogPosition, + ctx context.Context, cbState admission.LogEntryAdmittedCallbackState, ) { + if cbState.IsV2Protocol { + a.callbackV2.AdmittedLogEntry(ctx, cbState) + return + } // TODO(irfansharif,aaditya): This contributes to a high count of // inuse_objects. Look to address it as part of #104154. - a.dispatchWriter.Dispatch(ctx, origin, kvflowcontrolpb.AdmittedRaftLogEntries{ - RangeID: rangeID, - AdmissionPriority: int32(pri), + a.dispatchWriterV1.Dispatch(ctx, cbState.Origin, kvflowcontrolpb.AdmittedRaftLogEntries{ + RangeID: cbState.RangeID, + AdmissionPriority: int32(cbState.Pri), UpToRaftLogPosition: kvflowcontrolpb.RaftLogPosition{ - Term: pos.Term, - Index: pos.Index, + Term: cbState.Pos.Term, + Index: cbState.Pos.Index, }, - StoreID: storeID, + StoreID: cbState.StoreID, }) } diff --git a/pkg/server/application_api/stmtdiag_test.go b/pkg/server/application_api/stmtdiag_test.go index 637ab1ef00b4..0ff3e3b3ca91 100644 --- a/pkg/server/application_api/stmtdiag_test.go +++ b/pkg/server/application_api/stmtdiag_test.go @@ -13,6 +13,7 @@ package application_api_test import ( "context" "fmt" + "net/http" "testing" "time" @@ -34,39 +35,58 @@ func TestAdminAPIStatementDiagnosticsBundle(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) - ts := s.ApplicationLayer() + conn := sqlutils.MakeSQLRunner(ts.SQLConn(t)) query := "EXPLAIN ANALYZE (DEBUG) SELECT 'secret'" - _, err := db.Exec(query) - require.NoError(t, err) + conn.Exec(t, query) query = "SELECT id FROM system.statement_diagnostics LIMIT 1" - idRow, err := db.Query(query) - require.NoError(t, err) + idRow := conn.Query(t, query) var diagnosticRow string if idRow.Next() { - err = idRow.Scan(&diagnosticRow) + err := idRow.Scan(&diagnosticRow) require.NoError(t, err) } else { t.Fatal("no results") } - client, err := ts.GetAuthenticatedHTTPClient(false, serverutils.SingleTenantSession) - require.NoError(t, err) - resp, err := client.Get(ts.AdminURL().WithPath("/_admin/v1/stmtbundle/" + diagnosticRow).String()) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, 500, resp.StatusCode) + reqBundle := func(isAdmin bool, expectedStatusCode int) { + client, err := ts.GetAuthenticatedHTTPClient(isAdmin, serverutils.SingleTenantSession) + require.NoError(t, err) + resp, err := client.Get(ts.AdminURL().WithPath("/_admin/v1/stmtbundle/" + diagnosticRow).String()) + require.NoError(t, err) + defer func() { + err := resp.Body.Close() + require.NoError(t, err) + }() + require.Equal(t, expectedStatusCode, resp.StatusCode) + } - adminClient, err := ts.GetAuthenticatedHTTPClient(true, serverutils.SingleTenantSession) - require.NoError(t, err) - adminResp, err := adminClient.Get(ts.AdminURL().WithPath("/_admin/v1/stmtbundle/" + diagnosticRow).String()) - require.NoError(t, err) - defer adminResp.Body.Close() - require.Equal(t, 200, adminResp.StatusCode) + t.Run("with admin role", func(t *testing.T) { + reqBundle(true, http.StatusOK) + }) + + t.Run("no permissions", func(t *testing.T) { + reqBundle(false, http.StatusForbidden) + }) + + t.Run("with VIEWACTIVITYREDACTED role", func(t *testing.T) { + // VIEWACTIVITYREDACTED cannot download bundles due to PII. + // This will be revisited once we allow requesting and downloading redacted bundles. + conn.Exec(t, fmt.Sprintf("GRANT SYSTEM VIEWACTIVITYREDACTED TO %s", apiconstants.TestingUserNameNoAdmin().Normalized())) + reqBundle(false, http.StatusForbidden) + conn.Exec(t, fmt.Sprintf("REVOKE SYSTEM VIEWACTIVITYREDACTED FROM %s", apiconstants.TestingUserNameNoAdmin().Normalized())) + }) + + t.Run("with VIEWACTIVITY role", func(t *testing.T) { + // VIEWACTIVITY users can download bundles. + conn.Exec(t, fmt.Sprintf("GRANT SYSTEM VIEWACTIVITY TO %s", apiconstants.TestingUserNameNoAdmin().Normalized())) + reqBundle(false, http.StatusOK) + conn.Exec(t, fmt.Sprintf("REVOKE SYSTEM VIEWACTIVITY FROM %s", apiconstants.TestingUserNameNoAdmin().Normalized())) + }) } func TestCreateStatementDiagnosticsReport(t *testing.T) { diff --git a/pkg/server/config.go b/pkg/server/config.go index db03b3e1c58f..d48a6b809949 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -336,7 +336,7 @@ func (cfg *BaseConfig) SetDefaults( cfg.Config.InitDefaults() cfg.InitTestingKnobs() cfg.CidrLookup = cidr.NewLookup(&st.SV) - cfg.EarlyBootExternalStorageAccessor = cloud.NewEarlyBootExternalStorageAccessor(st, cfg.ExternalIODirConfig) + cfg.EarlyBootExternalStorageAccessor = cloud.NewEarlyBootExternalStorageAccessor(st, cfg.ExternalIODirConfig, cfg.CidrLookup) cfg.DiskMonitorManager = disk.NewMonitorManager(vfs.Default) cfg.DiskWriteStats = disk.NewWriteStatsManager(vfs.Default) } diff --git a/pkg/server/decommission.go b/pkg/server/decommission.go index 52dde12df3cb..072ec55c2941 100644 --- a/pkg/server/decommission.go +++ b/pkg/server/decommission.go @@ -257,7 +257,7 @@ func (s *topLevelServer) DecommissionPreCheck( }) if err != nil { - return decommissioning.PreCheckResult{}, grpcstatus.Errorf(codes.Internal, err.Error()) + return decommissioning.PreCheckResult{}, grpcstatus.Error(codes.Internal, err.Error()) } return decommissioning.PreCheckResult{ @@ -358,7 +358,7 @@ func (s *topLevelServer) Decommission( return grpcstatus.Error(codes.NotFound, liveness.ErrMissingRecord.Error()) } log.Errorf(ctx, "%+s", err) - return grpcstatus.Errorf(codes.Internal, err.Error()) + return grpcstatus.Error(codes.Internal, err.Error()) } if statusChanged { event, nodeDetails := newEvent() diff --git a/pkg/server/diagnostics/reporter.go b/pkg/server/diagnostics/reporter.go index 0e476788b230..4905d11a2149 100644 --- a/pkg/server/diagnostics/reporter.go +++ b/pkg/server/diagnostics/reporter.go @@ -17,6 +17,7 @@ import ( "net/http" "net/url" "reflect" + "sync/atomic" "time" "github.com/cockroachdb/cockroach/pkg/base" @@ -94,6 +95,13 @@ type Reporter struct { // TestingKnobs is used for internal test controls only. TestingKnobs *TestingKnobs + + // LastSuccessfulTelemetryPing records the current timestamp in + // seconds since the Unix epoch whenever we successfully make contact + // with the registration server. This timestamp will be updated + // regardless of whether the response we get back is successful or + // not. + LastSuccessfulTelemetryPing atomic.Int64 } // PeriodicallyReportDiagnostics starts a background worker that periodically @@ -160,11 +168,22 @@ func (r *Reporter) ReportDiagnostics(ctx context.Context) { } defer res.Body.Close() b, err = io.ReadAll(res.Body) - if err != nil || res.StatusCode != http.StatusOK { + if err != nil { log.Warningf(ctx, "failed to report node usage metrics: status: %s, body: %s, "+ "error: %v", res.Status, b, err) return } + + // If `err` == nil then we assume that we've made successful contact + // with the telemetry server and any further problems are not the + // customer's fault. We update the telemetry timestamp before moving + // on with other request handling. + r.LastSuccessfulTelemetryPing.Store(timeutil.Now().Unix()) + + if res.StatusCode != http.StatusOK { + log.Warningf(ctx, "failed to report node usage metrics: status: %s, body: %s", res.Status, b) + return + } r.SQLServer.GetReportedSQLStatsController().ResetLocalSQLStats(ctx) } diff --git a/pkg/server/drain.go b/pkg/server/drain.go index 18255c5e9fc0..618014005b33 100644 --- a/pkg/server/drain.go +++ b/pkg/server/drain.go @@ -101,7 +101,7 @@ func (s *adminServer) Drain(req *serverpb.DrainRequest, stream serverpb.Admin_Dr // Which node is this request for? nodeID, local, err := s.serverIterator.parseServerID(req.NodeId) if err != nil { - return status.Errorf(codes.InvalidArgument, err.Error()) + return status.Error(codes.InvalidArgument, err.Error()) } if !local { // This request is for another node. Forward it. @@ -332,6 +332,14 @@ func (s *drainServer) runDrain( func (s *drainServer) drainInner( ctx context.Context, reporter func(int, redact.SafeString), verbose bool, ) (err error) { + if s.sqlServer.sqlLivenessSessionID != "" { + // Set draining only if SQL instance was initialized + if err := s.sqlServer.sqlInstanceStorage.SetInstanceDraining( + ctx, s.sqlServer.sqlLivenessSessionID, s.sqlServer.SQLInstanceID()); err != nil { + return err + } + } + if s.serverCtl != nil { // We are on a KV node, with a server controller. // diff --git a/pkg/server/initial_sql.go b/pkg/server/initial_sql.go index f03a32f70fde..4a79561184ec 100644 --- a/pkg/server/initial_sql.go +++ b/pkg/server/initial_sql.go @@ -59,6 +59,9 @@ func (s *topLevelServer) RunInitialSQL( } log.Ops.Infof(ctx, "Replication was disabled for this cluster.\n"+ "When/if adding nodes in the future, update zone configurations to increase the replication factor.") + + // Disable license enforcement too + s.sqlServer.disableLicenseEnforcement(ctx) } if adminUser != "" && !s.Insecure() { diff --git a/pkg/server/job_profiler.go b/pkg/server/job_profiler.go index a68683034307..b11c947bcbcf 100644 --- a/pkg/server/job_profiler.go +++ b/pkg/server/job_profiler.go @@ -58,7 +58,7 @@ func (s *statusServer) RequestJobProfilerExecutionDetails( nodeID, local, err := s.parseNodeID(strconv.Itoa(int(coordinatorID))) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } // If this node is the current coordinator of the job then we can collect the diff --git a/pkg/server/license/BUILD.bazel b/pkg/server/license/BUILD.bazel new file mode 100644 index 000000000000..45d93a42a259 --- /dev/null +++ b/pkg/server/license/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "license", + srcs = [ + "cclbridge.go", + "enforcer.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/server/license", + visibility = ["//visibility:public"], + deps = [ + "//pkg/keys", + "//pkg/settings/cluster", + "//pkg/sql/isql", + "//pkg/sql/pgwire/pgcode", + "//pkg/sql/pgwire/pgerror", + "//pkg/util/envutil", + "//pkg/util/log", + "//pkg/util/timeutil", + "@com_github_cockroachdb_errors//:errors", + ], +) + +go_test( + name = "license_test", + srcs = [ + "enforcer_test.go", + "main_test.go", + ], + deps = [ + ":license", + "//pkg/base", + "//pkg/security/securityassets", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/sql", + "//pkg/sql/catalog/descs", + "//pkg/testutils/serverutils", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "//pkg/util/log", + "//pkg/util/timeutil", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/server/license/cclbridge.go b/pkg/server/license/cclbridge.go new file mode 100644 index 000000000000..72b8f8dc40eb --- /dev/null +++ b/pkg/server/license/cclbridge.go @@ -0,0 +1,38 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package license + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/settings/cluster" +) + +// This file serves as a bridge to the license code in the CCL packages. +// Directly importing CCL is not possible, so this file maps functions +// and types from that package to something usable in this package. + +// RegisterCallbackOnLicenseChange is a pointer to a function that will register +// a callback when the license changes. This is initially empty here. When +// initializing the ccl package, this variable will be set to a valid function. +var RegisterCallbackOnLicenseChange = func(ctx context.Context, st *cluster.Settings) {} + +// LicType is the type to define the license type, as needed by the license +// enforcer. +type LicType int + +const ( + LicTypeNone LicType = iota + LicTypeTrial + LicTypeFree + LicTypeEnterprise + LicTypeEvaluation +) diff --git a/pkg/server/license/enforcer.go b/pkg/server/license/enforcer.go new file mode 100644 index 000000000000..4cf0fc59347c --- /dev/null +++ b/pkg/server/license/enforcer.go @@ -0,0 +1,407 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package license + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" +) + +const ( + defaultMaxOpenTransactions = 5 + defaultMaxTelemetryInterval = 7 * 24 * time.Hour +) + +// Enforcer is responsible for enforcing license policies. +type Enforcer struct { + // TestingKnobs are used to control the behavior of the enforcer for testing. + TestingKnobs *TestingKnobs + + // telemetryStatusReporter is an interface for getting the timestamp of the + // last successful ping to the telemetry server. For some licenses, sending + // telemetry data is required to avoid throttling. + telemetryStatusReporter TelemetryStatusReporter + + // clusterInitGracePeriodEndTS marks the end of the grace period when a + // license is required. It is set during the cluster's initial startup. The + // timestamp is stored as seconds since the Unix epoch and is read/written to + // the KV. + clusterInitGracePeriodEndTS atomic.Int64 + + // startTime is the time when the enforcer was created. This is used to seed + // the clusterInitGracePeriodEndTS if it's not set in the KV layer. + startTime time.Time + + // licenseRequiresTelemetry will be true if the license requires that we send + // periodic telemetry data. + licenseRequiresTelemetry atomic.Bool + + // gracePeriodEndTS tracks when the grace period ends and throttling begins. + // For licenses without throttling, this value will be 0. The value stored + // is the number of seconds since the unix epoch. + gracePeriodEndTS atomic.Int64 + + // hasLicense is true if any license is installed. + hasLicense atomic.Bool + + // lastLicenseThrottlingLogTime keeps track of the last time we logged a + // message because we had to throttle due to a license issue. The value + // stored is the number of seconds since the unix epoch. + lastLicenseThrottlingLogTime atomic.Int64 + + // lastTelemetryThrottlingLogTime keeps track of the last time we logged a + // message because we had to throttle due to a telemetry issue. The value + // stored is the number of seconds since the unix epoch. + lastTelemetryThrottlingLogTime atomic.Int64 + + // isDisabled is a global override that completely disables license enforcement. + // When enabled, all checks, including telemetry and expired license validation, + // are bypassed. This is typically used to disable enforcement for single-node deployments. + isDisabled atomic.Bool +} + +type TestingKnobs struct { + // EnableGracePeriodInitTSWrite is a control knob for writing the grace period + // initialization timestamp. It is currently set to opt-in for writing the + // timestamp as a way to stage these changes. This ensures that the timestamp + // isn't written before the other license enforcement changes are complete. + // TODO(spilchen): Change this knob to opt-out as we approach the final stages + // of the core licensing deprecation work. This will be handled in CRDB-41758. + EnableGracePeriodInitTSWrite bool + + // OverrideStartTime if set, overrides the time that's used to seed the + // grace period init timestamp. + OverrideStartTime *time.Time + + // OverrideThrottleCheckTime if set, overrides the timestamp used when + // checking if throttling is active. + OverrideThrottleCheckTime *time.Time +} + +// TelemetryStatusReporter is the interface we use to find the last ping +// time for telemetry reporting. +type TelemetryStatusReporter interface { + // GetLastSuccessfulTelemetryPing returns the time of the last time the + // telemetry data got back an acknowledgement from Cockroach Labs. + GetLastSuccessfulTelemetryPing() time.Time +} + +var instance *Enforcer +var once sync.Once + +// GetEnforcerInstance returns the singleton instance of the Enforcer. The +// Enforcer is responsible for license enforcement policies. +func GetEnforcerInstance() *Enforcer { + once.Do( + func() { + instance = newEnforcer() + }) + return instance +} + +// newEnforcer creates a new Enforcer object. +func newEnforcer() *Enforcer { + return &Enforcer{ + startTime: timeutil.Now(), + } +} + +// SetTelemetryStatusReporter will set the pointer to the telemetry status reporter. +func (e *Enforcer) SetTelemetryStatusReporter(reporter TelemetryStatusReporter) { + e.telemetryStatusReporter = reporter +} + +// Start will load the necessary metadata for the enforcer. It reads from the +// KV license metadata and will populate any missing data as needed. The DB +// passed in must have access to the system tenant. +func (e *Enforcer) Start( + ctx context.Context, st *cluster.Settings, db isql.DB, initialStart bool, +) error { + e.maybeLogActiveOverrides(ctx) + + // Add a hook into the license setting so that we refresh our state whenever + // the license changes. + RegisterCallbackOnLicenseChange(ctx, st) + + // Writing the grace period initialization timestamp is currently opt-in. See + // the EnableGracePeriodInitTSWrite comment for details. + if e.TestingKnobs != nil && e.TestingKnobs.EnableGracePeriodInitTSWrite { + return e.maybeWriteClusterInitGracePeriodTS(ctx, db, initialStart) + } + return nil +} + +// maybeWriteClusterInitGracePeriodTS checks if the cluster init grace period +// timestamp needs to be written to the KV layer and writes it if needed. +func (e *Enforcer) maybeWriteClusterInitGracePeriodTS( + ctx context.Context, db isql.DB, initialStart bool, +) error { + return db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + // We could use a conditional put for this logic. However, we want to read + // and cache the value, and the common case is that the value will be read. + // Only during the initialization of the first node in the cluster will we + // need to write a new timestamp. So, we optimize for the case where the + // timestamp already exists. + val, err := txn.KV().Get(ctx, keys.GracePeriodInitTimestamp) + if err != nil { + return err + } + if val.Value == nil { + // The length of the grace period without a license varies based on the + // cluster's creation time. Older databases built when we had a + // CockroachDB core license are given more time. + gracePeriodLength := 30 * 24 * time.Hour + if initialStart { + gracePeriodLength = 7 * 24 * time.Hour + } + end := e.getStartTime().Add(gracePeriodLength) + log.Infof(ctx, "generated new cluster init grace period end time: %s", end.UTC().String()) + e.clusterInitGracePeriodEndTS.Store(end.Unix()) + return txn.KV().Put(ctx, keys.GracePeriodInitTimestamp, e.clusterInitGracePeriodEndTS.Load()) + } + e.clusterInitGracePeriodEndTS.Store(val.ValueInt()) + log.Infof(ctx, "fetched existing cluster init grace period end time: %s", e.GetClusterInitGracePeriodEndTS().String()) + return nil + }) +} + +// GetClusterInitGracePeriodEndTS will return the ending time of the grace +// period as stored during cluster init. This is intended to be used a grace +// period for when no license is installed. +func (e *Enforcer) GetClusterInitGracePeriodEndTS() time.Time { + // In the rare case that the timestamp has not been cached yet, we will return + // an approximate value, the start time of the server plus 7 days. This should + // only happen if we are in the process of caching the grace period init + // timestamp, or we failed to cache it. This is preferable to returning an + // error or a zero value. + if e.clusterInitGracePeriodEndTS.Load() == 0 { + return e.getStartTime().Add(7 * 24 * time.Hour) + } + return timeutil.Unix(e.clusterInitGracePeriodEndTS.Load(), 0) +} + +// GetHasLicense returns true if a license is installed. +func (e *Enforcer) GetHasLicense() bool { + return e.hasLicense.Load() +} + +// GetGracePeriodEndTS returns the timestamp indicating the end of the grace period. +// Some licenses provide a grace period after expiration or when no license is present. +// If no grace period is defined, the second return value will be false. +func (e *Enforcer) GetGracePeriodEndTS() (time.Time, bool) { + if e.gracePeriodEndTS.Load() == 0 { + return time.Time{}, false + } + ts := timeutil.Unix(e.gracePeriodEndTS.Load(), 0) + return ts, true +} + +// GetTelemetryDeadline returns a timestamp of when telemetry +// data needs to be received before we start to throttle. If the license doesn't +// require telemetry, then false is returned for second return value. +func (e *Enforcer) GetTelemetryDeadline() (time.Time, bool) { + if !e.licenseRequiresTelemetry.Load() || e.telemetryStatusReporter == nil { + return time.Time{}, false + } + + lastTelemetryDataReceived := e.telemetryStatusReporter.GetLastSuccessfulTelemetryPing() + throttleTS := lastTelemetryDataReceived.Add(e.getMaxTelemetryInterval()) + return throttleTS, true +} + +// MaybeFailIfThrottled evaluates the current transaction count and license state, +// returning an error if throttling conditions are met. Throttling may be triggered +// if the maximum number of open transactions is exceeded and the grace period has +// ended or if required diagnostic reporting has not been received. +func (e *Enforcer) MaybeFailIfThrottled(ctx context.Context, txnsOpened int64) (err error) { + // Early out if the number of transactions is below the max allowed or + // everything has been disabled. + if txnsOpened < e.getMaxOpenTransactions() || e.isDisabled.Load() { + return + } + + now := e.getThrottleCheckTS() + if gracePeriodEnd, ok := e.GetGracePeriodEndTS(); ok && now.After(gracePeriodEnd) { + if e.GetHasLicense() { + err = errors.WithHintf(pgerror.Newf(pgcode.CCLValidLicenseRequired, + "License expired. The maximum number of open transactions has been reached."), + "Obtain and install a new license to continue.") + } else { + err = errors.WithHintf(pgerror.Newf(pgcode.CCLValidLicenseRequired, + "No license installed. The maximum number of open transactions has been reached."), + "Obtain and install a valid license to continue.") + } + e.maybeLogError(ctx, err, &e.lastLicenseThrottlingLogTime, + fmt.Sprintf(", license expired with a grace period that ended at %s", gracePeriodEnd)) + return + } + + if ts, ok := e.GetTelemetryDeadline(); ok && now.After(ts) { + err = errors.WithHintf(pgerror.Newf(pgcode.CCLValidLicenseRequired, + "The maximum number of open transactions has been reached because the license requires "+ + "diagnostic reporting, but none has been received by Cockroach Labs."), + "Ensure diagnostic reporting is enabled and verify that nothing is blocking network access to the "+ + "Cockroach Labs reporting server. You can also consider changing your license to one that doesn't "+ + "require diagnostic reporting to be emitted.") + e.maybeLogError(ctx, err, &e.lastTelemetryThrottlingLogTime, + fmt.Sprintf("due to no telemetry data received, last received at %s", + e.telemetryStatusReporter.GetLastSuccessfulTelemetryPing())) + return + } + return +} + +// RefreshForLicenseChange resets the state when the license changes. We cache certain +// information to optimize enforcement. Instead of reading the license from the +// settings, unmarshaling it, and checking its type and expiry each time, +// caching the information improves efficiency since licenses change infrequently. +func (e *Enforcer) RefreshForLicenseChange(licType LicType, licenseExpiry time.Time) { + e.hasLicense.Store(licType != LicTypeNone) + + switch licType { + case LicTypeNone: + e.storeNewGracePeriodEndDate(e.GetClusterInitGracePeriodEndTS(), 0) + e.licenseRequiresTelemetry.Store(false) + case LicTypeFree: + e.storeNewGracePeriodEndDate(licenseExpiry, e.getGracePeriodDuration(30*24*time.Hour)) + e.licenseRequiresTelemetry.Store(true) + case LicTypeTrial: + e.storeNewGracePeriodEndDate(licenseExpiry, e.getGracePeriodDuration(7*24*time.Hour)) + e.licenseRequiresTelemetry.Store(true) + case LicTypeEvaluation: + e.storeNewGracePeriodEndDate(licenseExpiry, e.getGracePeriodDuration(30*24*time.Hour)) + e.licenseRequiresTelemetry.Store(false) + case LicTypeEnterprise: + e.storeNewGracePeriodEndDate(timeutil.UnixEpoch, 0) + e.licenseRequiresTelemetry.Store(false) + } +} + +// Disable turns off all license enforcement for the lifetime of this object. +func (e *Enforcer) Disable(ctx context.Context) { + // We provide an override so that we can continue to test license enforcement + // policies in single-node clusters. + skipDisable := envutil.EnvOrDefaultBool("COCKROACH_SKIP_LICENSE_ENFORCEMENT_DISABLE", false) + if skipDisable { + return + } + log.Infof(ctx, "disable all license enforcement") + e.isDisabled.Store(true) +} + +// getStartTime returns the time when the enforcer was created. This accounts +// for testing knobs that may override the time. +func (e *Enforcer) getStartTime() time.Time { + if e.TestingKnobs != nil && e.TestingKnobs.OverrideStartTime != nil { + return *e.TestingKnobs.OverrideStartTime + } + return e.startTime +} + +// getThrottleCheckTS returns the time to use when checking if we should +// throttle the new transaction. +func (e *Enforcer) getThrottleCheckTS() time.Time { + if e.TestingKnobs != nil && e.TestingKnobs.OverrideThrottleCheckTime != nil { + return *e.TestingKnobs.OverrideThrottleCheckTime + } + return timeutil.Now() +} + +func (e *Enforcer) storeNewGracePeriodEndDate(start time.Time, duration time.Duration) { + ts := start.Add(duration) + e.gracePeriodEndTS.Store(ts.Unix()) +} + +// getGracePeriodDuration is a helper to pick the grace period length, while +// accounting for testing knobs and/or environment variables. +func (e *Enforcer) getGracePeriodDuration(defaultAndMaxLength time.Duration) time.Duration { + newLength := envutil.EnvOrDefaultDuration("COCKROACH_LICENSE_GRACE_PERIOD_DURATION", defaultAndMaxLength) + // We only allow shortening of the grace period for testing purposes. Ensure + // it can never increase. + if defaultAndMaxLength < newLength { + return defaultAndMaxLength + } + return newLength +} + +// getMaxOpenTransactions returns the number of open transactions allowed before +// throttling takes affect. +func (e *Enforcer) getMaxOpenTransactions() int64 { + newLimit := envutil.EnvOrDefaultInt64("COCKROACH_MAX_OPEN_TXNS_DURING_THROTTLE", defaultMaxOpenTransactions) + // Ensure we can never increase the number of open transactions allowed. + if newLimit > defaultMaxOpenTransactions { + return defaultMaxOpenTransactions + } + return newLimit +} + +// getMaxTelemetryInterval returns the maximum duration allowed before telemetry +// data must be sent to comply with license policies that require telemetry. +func (e *Enforcer) getMaxTelemetryInterval() time.Duration { + newTimeframe := envutil.EnvOrDefaultDuration("COCKROACH_MAX_TELEMETRY_INTERVAL", defaultMaxTelemetryInterval) + // Ensure we can never extend beyond the default interval. + if newTimeframe > defaultMaxTelemetryInterval { + return defaultMaxTelemetryInterval + } + return newTimeframe +} + +// maybeLogError logs a throttling error message if one hasn't been logged +// recently. This helps alert about throttling issues without flooding the +// CockroachDB log. It also serves as a useful breadcrumb for debugging, +// particularly in automated test runs where client responses may not be fully +// examined. +func (e *Enforcer) maybeLogError( + ctx context.Context, err error, lastLogTimestamp *atomic.Int64, additionalMsg string, +) { + nextLogMessage := timeutil.Unix(lastLogTimestamp.Load(), 0).Add(5 * time.Minute) + + now := timeutil.Now() + if now.After(nextLogMessage) { + lastLogTimestamp.Store(now.Unix()) + log.Infof(ctx, "throttling for license enforcement is active %s: %s", additionalMsg, err.Error()) + } +} + +// maybeLogActiveOverrides is a debug tool to indicate any env var overrides. +func (e *Enforcer) maybeLogActiveOverrides(ctx context.Context) { + maxOpenTxns := e.getMaxOpenTransactions() + if maxOpenTxns != defaultMaxOpenTransactions { + log.Infof(ctx, "max open transactions before throttling overridden to %d", maxOpenTxns) + } + + // The grace period may vary depending on the license type. We'll select the + // maximum grace period across all licenses and compare it with the value + // from the getter to determine if an override is applied. + maxGracePeriod := 30 * 7 * time.Hour + curGracePeriod := e.getGracePeriodDuration(maxGracePeriod) + if curGracePeriod != maxGracePeriod { + log.Infof(ctx, "grace period has changed to %v", curGracePeriod) + } + + curTelemetryInterval := e.getMaxTelemetryInterval() + if curTelemetryInterval != defaultMaxTelemetryInterval { + log.Infof(ctx, "max telemetry interval has changed to %v", curTelemetryInterval) + } +} diff --git a/pkg/server/license/enforcer_test.go b/pkg/server/license/enforcer_test.go new file mode 100644 index 000000000000..461dc40099a8 --- /dev/null +++ b/pkg/server/license/enforcer_test.go @@ -0,0 +1,174 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package license_test + +import ( + "context" + "fmt" + "regexp" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/license" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +type mockTelemetryStatusReporter struct { + lastPingTime time.Time +} + +func (m mockTelemetryStatusReporter) GetLastSuccessfulTelemetryPing() time.Time { + return m.lastPingTime +} + +func TestGracePeriodInitTSCache(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // This is the timestamp that we'll override the grace period init timestamp with. + // This will be set when bringing up the server. + ts1 := timeutil.Unix(1724329716, 0) + ts1End := ts1.Add(7 * 24 * time.Hour) // Calculate the end of the grace period based on ts1 + + ctx := context.Background() + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + LicenseTestingKnobs: license.TestingKnobs{ + EnableGracePeriodInitTSWrite: true, + OverrideStartTime: &ts1, + }, + }, + }, + }) + defer srv.Stopper().Stop(ctx) + + // Create a new enforcer, to test that it won't overwrite the grace period init + // timestamp that was already setup. + enforcer := &license.Enforcer{} + ts2 := ts1.Add(1) + ts2End := ts2.Add(7 * 24 * time.Hour) // Calculate the end of the grace period + enforcer.TestingKnobs = &license.TestingKnobs{ + EnableGracePeriodInitTSWrite: true, + OverrideStartTime: &ts2, + } + // Ensure request for the grace period init ts1 before start just returns the start + // time used when the enforcer was created. + require.Equal(t, ts2End, enforcer.GetClusterInitGracePeriodEndTS()) + // Start the enforcer to read the timestamp from the KV. + enforcer.SetTelemetryStatusReporter(&mockTelemetryStatusReporter{lastPingTime: ts1}) + err := enforcer.Start(ctx, srv.ClusterSettings(), srv.SystemLayer().InternalDB().(descs.DB), false /* initialStart */) + require.NoError(t, err) + require.Equal(t, ts1End, enforcer.GetClusterInitGracePeriodEndTS()) + + // Access the enforcer that is cached in the executor config to make sure they + // work for the system tenant and secondary tenant. + require.Equal(t, ts1End, srv.SystemLayer().ExecutorConfig().(sql.ExecutorConfig).LicenseEnforcer.GetClusterInitGracePeriodEndTS()) + require.Equal(t, ts1End, srv.ApplicationLayer().ExecutorConfig().(sql.ExecutorConfig).LicenseEnforcer.GetClusterInitGracePeriodEndTS()) +} + +func TestThrottle(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + const UnderTxnThreshold = 3 + const AtTxnThreshold = 5 + const OverTxnThreshold = 7 + + t0 := time.Unix(1724884362, 0) + t1d := t0.Add(24 * time.Hour) + t8d := t0.Add(8 * 24 * time.Hour) + t10d := t0.Add(10 * 24 * time.Hour) + t15d := t0.Add(15 * 24 * time.Hour) + t17d := t0.Add(17 * 24 * time.Hour) + t18d := t0.Add(18 * 24 * time.Hour) + t30d := t0.Add(30 * 24 * time.Hour) + t45d := t0.Add(45 * 24 * time.Hour) + t46d := t0.Add(46 * 24 * time.Hour) + + for i, tc := range []struct { + openTxnsCount int64 + licType license.LicType + gracePeriodInit time.Time + lastTelemetryPingTime time.Time + licExpiry time.Time + checkTs time.Time + expectedErrRegex string + }{ + // Expired free license but under the transaction threshold + {UnderTxnThreshold, license.LicTypeFree, t0, t1d, t8d, t45d, ""}, + // Expired trial license but under the transaction threshold + {UnderTxnThreshold, license.LicTypeTrial, t0, t30d, t8d, t45d, ""}, + // Over the transaction threshold but not expired + {OverTxnThreshold, license.LicTypeFree, t0, t10d, t45d, t10d, ""}, + // Expired free license, past the grace period + {AtTxnThreshold, license.LicTypeFree, t0, t30d, t10d, t45d, "License expired"}, + // Expired free license, but not past the grace period + {OverTxnThreshold, license.LicTypeFree, t0, t30d, t10d, t17d, ""}, + // Valid free license, but telemetry ping hasn't been received in 7 days. + {AtTxnThreshold, license.LicTypeFree, t0, t10d, t45d, t17d, ""}, + // Valid free license, but telemetry ping hasn't been received in 8 days. + {OverTxnThreshold, license.LicTypeFree, t0, t10d, t45d, t18d, "diagnostic reporting"}, + // No license but within grace period still + {AtTxnThreshold, license.LicTypeNone, t0, t0, t0, t1d, ""}, + // No license but beyond grace period + {OverTxnThreshold, license.LicTypeNone, t0, t0, t0, t8d, "No license installed"}, + // Trial license has expired but still within grace period + {AtTxnThreshold, license.LicTypeTrial, t0, t30d, t10d, t15d, ""}, + // Trial license has expired and just at the edge of the grace period. + {OverTxnThreshold, license.LicTypeTrial, t0, t45d, t10d, t17d, ""}, + // Trial license has expired and just beyond the grace period. + {AtTxnThreshold, license.LicTypeTrial, t0, t45d, t10d, t18d, "License expired"}, + // No throttling if past the expiry of an enterprise license + {OverTxnThreshold, license.LicTypeEnterprise, t0, t0, t8d, t46d, ""}, + // Telemetry isn't needed for enterprise license + {AtTxnThreshold, license.LicTypeEnterprise, t0, t0, t45d, t30d, ""}, + // Telemetry isn't needed for evaluation license + {OverTxnThreshold, license.LicTypeEvaluation, t0, t0, t45d, t30d, ""}, + // Evaluation license doesn't throttle if expired but within grace period. + {AtTxnThreshold, license.LicTypeEvaluation, t0, t0, t15d, t30d, ""}, + // Evaluation license does throttle if expired and beyond grace period. + {OverTxnThreshold, license.LicTypeEvaluation, t0, t0, t15d, t46d, "License expired"}, + } { + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + e := license.Enforcer{ + TestingKnobs: &license.TestingKnobs{ + OverrideStartTime: &tc.gracePeriodInit, + OverrideThrottleCheckTime: &tc.checkTs, + }, + } + e.SetTelemetryStatusReporter(&mockTelemetryStatusReporter{ + lastPingTime: tc.lastTelemetryPingTime, + }) + e.RefreshForLicenseChange(tc.licType, tc.licExpiry) + err := e.MaybeFailIfThrottled(ctx, tc.openTxnsCount) + if tc.expectedErrRegex == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + re := regexp.MustCompile(tc.expectedErrRegex) + match := re.MatchString(err.Error()) + require.NotNil(t, match, "Error text %q doesn't match the expected regexp of %q", + err.Error(), tc.expectedErrRegex) + } + }) + } +} diff --git a/pkg/server/license/main_test.go b/pkg/server/license/main_test.go new file mode 100644 index 000000000000..9f15942a491c --- /dev/null +++ b/pkg/server/license/main_test.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package license_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security/securityassets" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" +) + +func TestMain(m *testing.M) { + securityassets.SetLoader(securitytest.EmbeddedAssets) + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + os.Exit(m.Run()) +} + +//go:generate ../../util/leaktest/add-leaktest.sh *_test.go diff --git a/pkg/server/migration_test.go b/pkg/server/migration_test.go index f001efa863b8..bca8f78a85b5 100644 --- a/pkg/server/migration_test.go +++ b/pkg/server/migration_test.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/upgrade/upgradebase" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/pebble/vfs" "github.com/stretchr/testify/require" ) @@ -169,9 +170,9 @@ func TestSyncAllEngines(t *testing.T) { // will have been lost. { memFS := vfsRegistry.Get(storeSpec.StickyVFSID) - memFS.SetIgnoreSyncs(true) + crashFS := memFS.CrashClone(vfs.CrashCloneCfg{}) s.Stopper().Stop(ctx) - memFS.ResetToSyncedState() + vfsRegistry.Set(storeSpec.StickyVFSID, crashFS) } // Restart the server. diff --git a/pkg/server/node.go b/pkg/server/node.go index 9e0f8b53791e..f8ec3a3c4505 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -14,6 +14,7 @@ import ( "bytes" "context" "fmt" + "math" "net" "sort" "strings" @@ -241,6 +242,13 @@ var ( `duration spent in processing above any available stack history is appended to its trace, if automatic trace snapshots are enabled`, time.Second*30, ) + + livenessRangeCompactInterval = settings.RegisterDurationSetting( + settings.SystemOnly, + "kv.liveness_range_compact.interval", + `interval at which the liveness range is compacted. A value of 0 disables the periodic compaction`, + 0, + ) ) // By default, stores will be started concurrently. @@ -402,8 +410,6 @@ type Node struct { // COCKROACH_DEBUG_TS_IMPORT_FILE env var. suppressNodeStatus atomic.Bool - diskStatsMap diskStatsMap - testingErrorEvent func(context.Context, *kvpb.BatchRequest, error) // Used to collect samples for the key visualizer. @@ -812,6 +818,8 @@ func (n *Node) start( log.Infof(ctx, "started with engine type %v", &t) } log.Infof(ctx, "started with attributes %v", attrs.Attrs) + + n.startPeriodicLivenessCompaction(n.stopper, livenessRangeCompactInterval) return nil } @@ -1103,6 +1111,87 @@ func (n *Node) startComputePeriodicMetrics(stopper *stop.Stopper, interval time. }) } +// startPeriodicLivenessCompaction starts a loop where it periodically compacts +// the liveness range. +func (n *Node) startPeriodicLivenessCompaction( + stopper *stop.Stopper, livenessRangeCompactInterval *settings.DurationSetting, +) { + ctx := n.AnnotateCtx(context.Background()) + + // getCompactionInterval() returns the interval at which the liveness range is + // set to be compacted. If the interval is set to 0, the period is set to the + // max possible duration because a value of 0 cause the ticker to panic. + getCompactionInterval := func() time.Duration { + interval := livenessRangeCompactInterval.Get(&n.storeCfg.Settings.SV) + if interval == 0 { + interval = math.MaxInt64 + } + return interval + } + + if err := stopper.RunAsyncTask(ctx, "liveness-compaction", func(ctx context.Context) { + interval := getCompactionInterval() + ticker := time.NewTicker(interval) + + intervalChangeChan := make(chan time.Duration) + + // Update the compaction interval when the setting changes. + livenessRangeCompactInterval.SetOnChange(&n.storeCfg.Settings.SV, func(ctx context.Context) { + // intervalChangeChan is used to signal the compaction loop that the + // interval has changed. Avoid blocking the main goroutine that is + // responsible for handling all settings updates. + select { + case intervalChangeChan <- getCompactionInterval(): + default: + } + }) + + defer ticker.Stop() + for { + select { + case <-ticker.C: + // Find the liveness replica in order to compact it. + _ = n.stores.VisitStores(func(store *kvserver.Store) error { + store.VisitReplicas(func(repl *kvserver.Replica) bool { + span := repl.Desc().KeySpan().AsRawSpanWithNoLocals() + if keys.NodeLivenessSpan.Overlaps(span) { + + // The CompactRange() method expects the start and end keys to be + // encoded. + startEngineKey := + storage.EngineKey{ + Key: span.Key, + }.Encode() + + endEngineKey := + storage.EngineKey{ + Key: span.EndKey, + }.Encode() + + timeBeforeCompaction := timeutil.Now() + if err := store.StateEngine().CompactRange(startEngineKey, endEngineKey); err != nil { + log.Errorf(ctx, "failed compacting liveness replica: %+v with error: %s", repl, err) + } + + log.Infof(ctx, "finished compacting liveness replica: %+v and it took: %+v", + repl, timeutil.Since(timeBeforeCompaction)) + } + return true + }) + return nil + }) + case newInterval := <-intervalChangeChan: + ticker.Reset(newInterval) + case <-stopper.ShouldQuiesce(): + return + } + } + }); err != nil { + log.Errorf(ctx, "failed to start the async liveness compaction task") + } + +} + // updateNodeRangeCount updates the internal counter of the total ranges across // all stores. This value is used to make a decision on whether the node should // use expiration leases (see Replica.shouldUseExpirationLeaseRLocked). @@ -1144,6 +1233,7 @@ func (n *Node) computeMetricsPeriodically( return nil }) n.updateNodeRangeCount() + n.storeCfg.KVFlowStreamTokenProvider.UpdateMetricGauges() return err } @@ -1240,27 +1330,33 @@ func (mm *diskMonitorManager) Monitor(path string) (kvserver.DiskStatsMonitor, e func (n *Node) registerEnginesForDiskStatsMap( specs []base.StoreSpec, engines []storage.Engine, diskManager *diskMonitorManager, -) error { - if err := n.diskStatsMap.initDiskStatsMap(specs, engines, diskManager); err != nil { - return err +) (admission.PebbleMetricsProvider, error) { + pmp := &nodePebbleMetricsProvider{n: n} + if err := pmp.diskStatsMap.initDiskStatsMap(specs, engines, diskManager); err != nil { + return nil, err } - if err := n.stores.RegisterDiskMonitors(n.diskStatsMap.diskMonitors); err != nil { - return err + if err := n.stores.RegisterDiskMonitors(pmp.diskStatsMap.diskMonitors); err != nil { + return nil, err } - return nil + return pmp, nil +} + +type nodePebbleMetricsProvider struct { + n *Node + diskStatsMap diskStatsMap } // GetPebbleMetrics implements admission.PebbleMetricsProvider. -func (n *Node) GetPebbleMetrics() []admission.StoreMetrics { +func (pmp *nodePebbleMetricsProvider) GetPebbleMetrics() []admission.StoreMetrics { clusterProvisionedBandwidth := kvadmission.ProvisionedBandwidth.Get( - &n.storeCfg.Settings.SV) - storeIDToDiskStats, err := n.diskStatsMap.tryPopulateAdmissionDiskStats(clusterProvisionedBandwidth) + &pmp.n.storeCfg.Settings.SV) + storeIDToDiskStats, err := pmp.diskStatsMap.tryPopulateAdmissionDiskStats(clusterProvisionedBandwidth) if err != nil { log.Warningf(context.Background(), "%v", errors.Wrapf(err, "unable to populate disk stats")) } var metrics []admission.StoreMetrics - _ = n.stores.VisitStores(func(store *kvserver.Store) error { + _ = pmp.n.stores.VisitStores(func(store *kvserver.Store) error { m := store.TODOEngine().GetMetrics() diskStats := admission.DiskStats{ProvisionedBandwidth: clusterProvisionedBandwidth} if s, ok := storeIDToDiskStats[store.StoreID()]; ok { @@ -1276,6 +1372,11 @@ func (n *Node) GetPebbleMetrics() []admission.StoreMetrics { return metrics } +// Close implements admission.PebbleMetricsProvider. +func (pmp *nodePebbleMetricsProvider) Close() { + pmp.diskStatsMap.closeDiskMonitors() +} + // GetTenantWeights implements kvserver.TenantWeightProvider. func (n *Node) GetTenantWeights() kvadmission.TenantWeights { weights := kvadmission.TenantWeights{ @@ -1925,44 +2026,6 @@ func (n *Node) RangeLookup( return resp, nil } -// perRangeEventSink is an implementation of rangefeed.Stream which annotates -// each response with rangeID and streamID. It is used by MuxRangeFeed. -type perRangeEventSink struct { - ctx context.Context - rangeID roachpb.RangeID - streamID int64 - wrapped *rangefeed.StreamMuxer -} - -var _ kvpb.RangeFeedEventSink = (*perRangeEventSink)(nil) -var _ rangefeed.Stream = (*perRangeEventSink)(nil) - -func (s *perRangeEventSink) Context() context.Context { - return s.ctx -} - -// SendIsThreadSafe is a no-op declaration method. It is a contract that the -// Send method is thread-safe. Note that Send wraps rangefeed.StreamMuxer which -// declares its Send method to be thread-safe. -func (s *perRangeEventSink) SendIsThreadSafe() {} - -func (s *perRangeEventSink) Send(event *kvpb.RangeFeedEvent) error { - response := &kvpb.MuxRangeFeedEvent{ - RangeFeedEvent: *event, - RangeID: s.rangeID, - StreamID: s.streamID, - } - return s.wrapped.Send(response) -} - -// Disconnect implements the rangefeed.Stream interface. It requests the -// StreamMuxer to detach the stream. The StreamMuxer is then responsible for -// handling the actual disconnection and additional cleanup. Note that Caller -// should not rely on immediate disconnection as cleanup takes place async. -func (s *perRangeEventSink) Disconnect(err *kvpb.Error) { - s.wrapped.DisconnectStreamWithError(s.streamID, s.rangeID, err) -} - // lockedMuxStream provides support for concurrent calls to Send. The underlying // MuxRangeFeedServer (default grpc.Stream) is not safe for concurrent calls to // Send. @@ -1979,55 +2042,100 @@ func (s *lockedMuxStream) Send(e *kvpb.MuxRangeFeedEvent) error { return s.wrapped.Send(e) } +// streamManager is an interface that defines the methods required to manage a +// rangefeed.Stream at the node level. Implemented by rangefeed.BufferedSender +// and rangefeed.UnbufferedSender. +type streamManager interface { + // SendBufferedError disconnects the stream with the ev.StreamID and sends + // error back to client. This call is un-blocking, and additional clean-up + // takes place async. Caller cannot expect immediate disconnection. + SendBufferedError(ev *kvpb.MuxRangeFeedEvent) + // AddStream adds a new per-range stream for the streamManager to manage. + AddStream(streamID int64, cancel context.CancelFunc) + // Start starts the streamManager background job to manage all active streams. + // It continues until it errors or Stop is called. It is not valid to call + // Start multiple times or restart after Stop. + Start(ctx context.Context, stopper *stop.Stopper) error + // Stop streamManager background job if it is still running. + Stop() + // Error returns a channel that will be non-empty if the streamManager + // encounters an error and a node level shutdown is required. + Error() chan error +} + // MuxRangeFeed implements the roachpb.InternalServer interface. -func (n *Node) MuxRangeFeed(stream kvpb.Internal_MuxRangeFeedServer) error { - muxStream := &lockedMuxStream{wrapped: stream} +func (n *Node) MuxRangeFeed(muxStream kvpb.Internal_MuxRangeFeedServer) error { + lockedMuxStream := &lockedMuxStream{wrapped: muxStream} // All context created below should derive from this context, which is // cancelled once MuxRangeFeed exits. - ctx, cancel := context.WithCancel(n.AnnotateCtx(stream.Context())) + ctx, cancel := context.WithCancel(n.AnnotateCtx(muxStream.Context())) defer cancel() - streamMuxer := rangefeed.NewStreamMuxer(muxStream, n.metrics) - if err := streamMuxer.Start(ctx, n.stopper); err != nil { + + var sm streamManager + if kvserver.RangefeedUseBufferedSender.Get(&n.storeCfg.Settings.SV) { + sm = rangefeed.NewBufferedSender(lockedMuxStream, n.metrics) + log.Fatalf(ctx, "unimplemented: buffered sender for rangefeed #126560") + } else { + sm = rangefeed.NewUnbufferedSender(lockedMuxStream, n.metrics) + } + + if err := sm.Start(ctx, n.stopper); err != nil { return err } - defer streamMuxer.Stop() + defer sm.Stop() + + makeMuxRangefeedErrorEvent := func( + streamID int64, rangeID roachpb.RangeID, err *kvpb.Error, + ) *kvpb.MuxRangeFeedEvent { + ev := &kvpb.MuxRangeFeedEvent{ + StreamID: streamID, + RangeID: rangeID, + } + ev.MustSetValue(&kvpb.RangeFeedError{ + Error: *err, + }) + return ev + } for { select { - case err := <-streamMuxer.Error(): + case err := <-sm.Error(): return err case <-ctx.Done(): return ctx.Err() case <-n.stopper.ShouldQuiesce(): return stop.ErrUnavailable default: - req, err := stream.Recv() + req, err := muxStream.Recv() if err != nil { return err } if req.CloseStream { + // For client close stream requests, we now shut down at a later time. Check if that is okay. // Note that we will call DisconnectStreamWithError again when // registration.disconnect happens, but DisconnectStreamWithError will // ignore subsequent errors. - streamMuxer.DisconnectStreamWithError(req.StreamID, req.RangeID, - kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED))) + sm.SendBufferedError(makeMuxRangefeedErrorEvent(req.StreamID, req.RangeID, + kvpb.NewError(kvpb.NewRangeFeedRetryError(kvpb.RangeFeedRetryError_REASON_RANGEFEED_CLOSED)))) continue } streamCtx, cancel := context.WithCancel(ctx) streamCtx = logtags.AddTag(streamCtx, "r", req.RangeID) - streamCtx = logtags.AddTag(streamCtx, "s", req.Replica.StoreID) + streamCtx = logtags.AddTag(streamCtx, "sm", req.Replica.StoreID) streamCtx = logtags.AddTag(streamCtx, "sid", req.StreamID) - streamSink := &perRangeEventSink{ - ctx: streamCtx, - rangeID: req.RangeID, - streamID: req.StreamID, - wrapped: streamMuxer, + var streamSink rangefeed.Stream + if ubs, ok := sm.(*rangefeed.UnbufferedSender); ok { + streamSink = rangefeed.NewPerRangeEventSink(streamCtx, req.RangeID, req.StreamID, ubs) + } else if bs, ok := sm.(*rangefeed.BufferedSender); ok { + streamSink = rangefeed.NewBufferedPerRangeEventSink(streamCtx, req.RangeID, req.StreamID, bs) + } else { + log.Fatalf(streamCtx, "unknown sender type %T", sm) } - streamMuxer.AddStream(req.StreamID, req.RangeID, cancel) + sm.AddStream(req.StreamID, cancel) // Rangefeed attempts to register rangefeed a request over the specified // span. If registration fails, it returns an error. Otherwise, it returns @@ -2035,7 +2143,8 @@ func (n *Node) MuxRangeFeed(stream kvpb.Internal_MuxRangeFeedServer) error { // the provided streamSink. If the rangefeed disconnects after being // successfully registered, it calls streamSink.Disconnect with the error. if err := n.stores.RangeFeed(req, streamSink); err != nil { - streamMuxer.DisconnectStreamWithError(req.StreamID, req.RangeID, kvpb.NewError(err)) + sm.SendBufferedError( + makeMuxRangefeedErrorEvent(req.StreamID, req.RangeID, kvpb.NewError(err))) } } } diff --git a/pkg/server/problem_ranges.go b/pkg/server/problem_ranges.go index e0816963cf73..63d8839410a6 100644 --- a/pkg/server/problem_ranges.go +++ b/pkg/server/problem_ranges.go @@ -41,7 +41,7 @@ func (s *systemStatusServer) ProblemRanges( if len(req.NodeID) > 0 { requestedNodeID, _, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } isLiveMap = map[roachpb.NodeID]livenesspb.NodeVitality{requestedNodeID: s.nodeLiveness.GetNodeVitalityFromCache(requestedNodeID)} } else { @@ -80,7 +80,7 @@ func (s *systemStatusServer) ProblemRanges( // Context completed, response no longer needed. } }); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } } @@ -141,6 +141,10 @@ func (s *systemStatusServer) ProblemRanges( problems.PausedReplicaIDs = append(problems.PausedReplicaIDs, info.State.Desc.RangeID) } + if info.Problems.RangeTooLarge { + problems.TooLargeRangeIds = + append(problems.TooLargeRangeIds, info.State.Desc.RangeID) + } } slices.Sort(problems.UnavailableRangeIDs) slices.Sort(problems.RaftLeaderNotLeaseHolderRangeIDs) @@ -152,9 +156,10 @@ func (s *systemStatusServer) ProblemRanges( slices.Sort(problems.RaftLogTooLargeRangeIDs) slices.Sort(problems.CircuitBreakerErrorRangeIDs) slices.Sort(problems.PausedReplicaIDs) + slices.Sort(problems.TooLargeRangeIds) response.ProblemsByNodeID[resp.nodeID] = problems case <-ctx.Done(): - return nil, status.Errorf(codes.DeadlineExceeded, ctx.Err().Error()) + return nil, status.Error(codes.DeadlineExceeded, ctx.Err().Error()) } } diff --git a/pkg/server/server.go b/pkg/server/server.go index 96b78811e351..b3fdadc33fbf 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -51,6 +51,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowdispatch" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/kvflowhandle" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/node_rac2" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvstorage" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" @@ -63,6 +64,7 @@ import ( serverrangefeed "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rangelog" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/reports" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitieswatcher" @@ -420,11 +422,12 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf // started via the server controller. cfg.RuntimeStatSampler = runtimeSampler - appRegistry.AddMetric(base.LicenseTTL) - err = base.UpdateMetricOnLicenseChange(ctx, cfg.Settings, base.LicenseTTL, timeutil.DefaultTimeSource{}, stopper) - if err != nil { - log.Errorf(ctx, "unable to initialize periodic license metric update: %v", err) - } + appRegistry.AddMetric(metric.NewFunctionalGauge(base.LicenseTTLMetadata, func() int64 { + return base.GetLicenseTTL(ctx, cfg.Settings, timeutil.DefaultTimeSource{}) + })) + appRegistry.AddMetric(metric.NewFunctionalGauge(base.AdditionalLicenseTTLMetadata, func() int64 { + return base.GetLicenseTTL(ctx, cfg.Settings, timeutil.DefaultTimeSource{}) + })) // Create and add KV metric rules. kvserver.CreateAndAddRules(ctx, ruleRegistry) @@ -577,8 +580,9 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf ) storesForFlowControl := kvserver.MakeStoresForFlowControl(stores) + storesForRACv2 := kvserver.MakeStoresForRACv2(stores) kvflowTokenDispatch := kvflowdispatch.New(nodeRegistry, storesForFlowControl, nodeIDContainer) - admittedEntryAdaptor := newAdmittedLogEntryAdaptor(kvflowTokenDispatch) + admittedEntryAdaptor := newAdmittedLogEntryAdaptor(kvflowTokenDispatch, storesForRACv2) admissionKnobs, ok := cfg.TestingKnobs.AdmissionControl.(*admission.TestingKnobs) if !ok { admissionKnobs = &admission.TestingKnobs{} @@ -625,6 +629,12 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf admissionControl.storesFlowControl.ResetStreams(ctx) }) + admittedPiggybacker := node_rac2.NewAdmittedPiggybacker() + streamTokenCounterProvider := rac2.NewStreamTokenCounterProvider(st, clock) + evalWaitMetrics := rac2.NewEvalWaitMetrics() + nodeRegistry.AddMetricStruct(evalWaitMetrics) + nodeRegistry.AddMetricStruct(streamTokenCounterProvider.Metrics()) + var raftTransportKnobs *kvserver.RaftTransportTestingKnobs if knobs := cfg.TestingKnobs.RaftTransport; knobs != nil { raftTransportKnobs = knobs.(*kvserver.RaftTransportTestingKnobs) @@ -639,11 +649,16 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf admissionControl.kvflowTokenDispatch, admissionControl.storesFlowControl, admissionControl.storesFlowControl, - (*node_rac2.AdmittedPiggybacker)(nil), + admittedPiggybacker, + storesForRACv2, raftTransportKnobs, ) nodeRegistry.AddMetricStruct(raftTransport.Metrics()) + storeLivenessTransport := storeliveness.NewTransport( + cfg.AmbientCtx, stopper, clock, kvNodeDialer, grpcServer.Server, + ) + ctSender := sidetransport.NewSender(stopper, st, clock, kvNodeDialer) ctReceiver := sidetransport.NewReceiver(nodeIDContainer, stopper, stores, nil /* testingKnobs */) @@ -850,6 +865,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf Gossip: g, NodeLiveness: nodeLiveness, Transport: raftTransport, + StoreLivenessTransport: storeLivenessTransport, NodeDialer: kvNodeDialer, RPCContext: rpcContext, ScanInterval: cfg.ScanInterval, @@ -875,6 +891,9 @@ func NewServer(cfg Config, stopper *stop.Stopper) (serverctl.ServerStartupInterf KVFlowController: admissionControl.kvflowController, KVFlowHandles: admissionControl.storesFlowControl, KVFlowHandleMetrics: admissionControl.kvFlowHandleMetrics, + KVFlowAdmittedPiggybacker: admittedPiggybacker, + KVFlowStreamTokenProvider: streamTokenCounterProvider, + KVFlowEvalWaitMetrics: evalWaitMetrics, SchedulerLatencyListener: admissionControl.schedulerLatencyListener, RangeCount: &atomic.Int64{}, } @@ -1971,10 +1990,11 @@ func (s *topLevelServer) PreStart(ctx context.Context) error { // wholly initialized stores (it reads the StoreIdentKeys). It also needs // to come before the call into SetPebbleMetricsProvider, which internally // uses the disk stats map we're initializing. - if err := s.node.registerEnginesForDiskStatsMap(s.cfg.Stores.Specs, s.engines, (*diskMonitorManager)(s.cfg.DiskMonitorManager)); err != nil { + var pmp admission.PebbleMetricsProvider + if pmp, err = s.node.registerEnginesForDiskStatsMap( + s.cfg.Stores.Specs, s.engines, (*diskMonitorManager)(s.cfg.DiskMonitorManager)); err != nil { return errors.Wrapf(err, "failed to register engines for the disk stats map") } - s.stopper.AddCloser(stop.CloserFn(func() { s.node.diskStatsMap.closeDiskMonitors() })) // Stores have been initialized, so Node can now provide Pebble metrics. // @@ -1984,7 +2004,7 @@ func (s *topLevelServer) PreStart(ctx context.Context) error { // existing stores shouldn’t be able to acquire leases yet. Although, below // Raft commands like log application and snapshot application may be able // to bypass admission control. - s.storeGrantCoords.SetPebbleMetricsProvider(ctx, s.node, s.node) + s.storeGrantCoords.SetPebbleMetricsProvider(ctx, pmp, s.node) // Once all stores are initialized, check if offline storage recovery // was done prior to start and record any actions appropriately. @@ -2072,6 +2092,7 @@ func (s *topLevelServer) PreStart(ctx context.Context) error { s.stopper, s.cfg.TestingKnobs, orphanedLeasesTimeThresholdNanos, + s.InitialStart(), ); err != nil { return err } diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index 3692334c2c20..9288d6dce815 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -57,6 +57,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/clientsecopts" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/diagnostics" + "github.com/cockroachdb/cockroach/pkg/server/license" "github.com/cockroachdb/cockroach/pkg/server/pgurl" "github.com/cockroachdb/cockroach/pkg/server/serverctl" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -108,6 +109,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slinstance" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slprovider" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilegecache" @@ -1057,6 +1059,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { NodeDescs: cfg.nodeDescs, TenantCapabilitiesReader: cfg.tenantCapabilitiesReader, CidrLookup: cfg.BaseConfig.CidrLookup, + LicenseEnforcer: license.GetEnforcerInstance(), } if codec.ForSystemTenant() { @@ -1137,6 +1140,11 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { if externalConnKnobs := cfg.TestingKnobs.ExternalConnection; externalConnKnobs != nil { execCfg.ExternalConnectionTestingKnobs = externalConnKnobs.(*externalconn.TestingKnobs) } + + if insightsKnobs := cfg.TestingKnobs.Insights; insightsKnobs != nil { + execCfg.InsightsTestingKnobs = insightsKnobs.(*insights.TestingKnobs) + + } var tableStatsTestingKnobs *stats.TableStatsTestingKnobs if tableStatsKnobs := cfg.TestingKnobs.TableStatsKnobs; tableStatsKnobs != nil { tableStatsTestingKnobs = tableStatsKnobs.(*stats.TableStatsTestingKnobs) @@ -1232,10 +1240,6 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { // Job internal operations use the node principal. sd.UserProto = username.NodeUserName().EncodeProto() - - // The following should not apply to SQL operations performed by the jobs - // subsystem. - sd.StmtTimeout = 0 }) jobRegistry.SetInternalDB(jobsInternalDB) @@ -1440,6 +1444,7 @@ func (s *SQLServer) preStart( stopper *stop.Stopper, knobs base.TestingKnobs, orphanedLeasesTimeThresholdNanos int64, + initialStart bool, ) error { // If necessary, start the tenant proxy first, to ensure all other // components can properly route to KV nodes. The Start method will block @@ -1757,6 +1762,8 @@ func (s *SQLServer) preStart( ) s.execCfg.SyntheticPrivilegeCache.Start(ctx) + s.startLicenseEnforcer(ctx, knobs, initialStart) + // Report a warning if the server is being shut down via the stopper // before it was gracefully drained. This warning may be innocuous // in tests where there is no use of the test server/cluster after @@ -1906,6 +1913,34 @@ func (s *SQLServer) StartDiagnostics(ctx context.Context) { s.diagnosticsReporter.PeriodicallyReportDiagnostics(ctx, s.stopper) } +func (s *SQLServer) startLicenseEnforcer( + ctx context.Context, knobs base.TestingKnobs, initialStart bool, +) { + // Start the license enforcer. This is only started for the system tenant since + // it requires access to the system keyspace. For secondary tenants, this struct + // is shared to provide access to the values cached from the KV read. + if s.execCfg.Codec.ForSystemTenant() { + if knobs.Server != nil { + s.execCfg.LicenseEnforcer.TestingKnobs = &knobs.Server.(*TestingKnobs).LicenseTestingKnobs + } + // TODO(spilchen): we need to tell the license enforcer about the + // diagnostics reporter. This will be handled in CRDB-39991 + err := startup.RunIdempotentWithRetry(ctx, s.stopper.ShouldQuiesce(), "license enforcer start", + func(ctx context.Context) error { + return s.execCfg.LicenseEnforcer.Start(ctx, s.cfg.Settings, s.internalDB, initialStart) + }) + // This is not a critical component. If it fails to start, we log a warning + // rather than prevent the entire server from starting. + if err != nil { + log.Warningf(ctx, "failed to start the license enforcer: %v", err) + } + } +} + +func (s *SQLServer) disableLicenseEnforcement(ctx context.Context) { + s.execCfg.LicenseEnforcer.Disable(ctx) +} + // AnnotateCtx annotates the given context with the server tracer and tags. func (s *SQLServer) AnnotateCtx(ctx context.Context) context.Context { return s.ambientCtx.AnnotateCtx(ctx) diff --git a/pkg/server/serverpb/admin.proto b/pkg/server/serverpb/admin.proto index 51661b23bd2d..f3dbda2917f5 100644 --- a/pkg/server/serverpb/admin.proto +++ b/pkg/server/serverpb/admin.proto @@ -24,6 +24,7 @@ import "kv/kvpb/api.proto"; import "roachpb/metadata.proto"; import "roachpb/data.proto"; import "ts/catalog/chart_catalog.proto"; +import "util/hlc/timestamp.proto"; import "util/metric/metric.proto"; import "util/tracing/tracingpb/recorded_span.proto"; import "gogoproto/gogo.proto"; @@ -1307,6 +1308,10 @@ service Admin { get: "/_admin/v1/tenants" }; } + + // ReadFromTenantInfo returns the tenant from which the requesting tenant + // should read, if any. + rpc ReadFromTenantInfo(ReadFromTenantInfoRequest) returns (ReadFromTenantInfoResponse) {} } message ListTenantsRequest{} @@ -1322,6 +1327,24 @@ message Tenant { string rpc_addr = 4; } +// ReadFromTenantInfoRequest requests info, if any, on which tenant the caller +// should read from. +message ReadFromTenantInfoRequest { + // TenantID should always be the ID of the tenant making the request. This + // duplicates the ID in the auth context that is added implicitly, and must + // always match that ID when that ID is present, however that ID is absent in + // insecure test clusters which is why we also specify it explicitly here. + roachpb.TenantID tenant_id = 1 [(gogoproto.nullable)=false, (gogoproto.customname) = "TenantID"]; +} + +// ReadFromTenantInfoResponse instructs a tenant as to which tenant, if any, it +// should configure itself to read from and the timestamp at which it should do +// so. +message ReadFromTenantInfoResponse { + roachpb.TenantID read_from = 1 [(gogoproto.nullable)=false]; + util.hlc.Timestamp read_at = 2 [(gogoproto.nullable)=false]; +} + message ListTracingSnapshotsRequest {} message ListTracingSnapshotsResponse { diff --git a/pkg/server/serverpb/status.go b/pkg/server/serverpb/status.go index 1e68ece42efc..81c475b7751c 100644 --- a/pkg/server/serverpb/status.go +++ b/pkg/server/serverpb/status.go @@ -48,6 +48,8 @@ type SQLStatusServer interface { NodesUI(context.Context, *NodesRequest) (*NodesResponseExternal, error) RequestJobProfilerExecutionDetails(context.Context, *RequestJobProfilerExecutionDetailsRequest) (*RequestJobProfilerExecutionDetailsResponse, error) TenantServiceStatus(context.Context, *TenantServiceStatusRequest) (*TenantServiceStatusResponse, error) + UpdateTableMetadataCache(context.Context, *UpdateTableMetadataCacheRequest) (*UpdateTableMetadataCacheResponse, error) + GetUpdateTableMetadataCacheSignal() chan struct{} } // OptionalNodesStatusServer is a StatusServer that is only optionally present diff --git a/pkg/server/serverpb/status.proto b/pkg/server/serverpb/status.proto index 55aeede28e55..2cd5adfec3ac 100644 --- a/pkg/server/serverpb/status.proto +++ b/pkg/server/serverpb/status.proto @@ -409,6 +409,7 @@ message RangeProblems { bool raft_log_too_large = 7; bool circuit_breaker_error = 9; bool paused_followers = 10; + bool range_too_large = 11; } // RangeStatistics describes statistics reported by a range. For internal use @@ -1324,6 +1325,11 @@ message ProblemRangesResponse { (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID" ]; + repeated int64 too_large_range_ids = 12 [ + (gogoproto.customname) = "TooLargeRangeIds", + (gogoproto.casttype) = + "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID" + ]; } reserved 1 to 7; // NodeID is the node that submitted all the requests. @@ -2179,6 +2185,16 @@ message RequestJobProfilerExecutionDetailsResponse {} repeated string files = 1; } + message UpdateTableMetadataCacheRequest { + // If true, the server will attempt to send a signal to the + // table metadata job by notifying the channel set on the + // status server. + bool local = 1; + } + + message UpdateTableMetadataCacheResponse { + } + service Status { // Certificates retrieves a copy of the TLS certificates. @@ -2667,4 +2683,7 @@ service Status { get: "/_status/list_job_profiler_execution_details/{job_id}" }; } + + rpc UpdateTableMetadataCache(UpdateTableMetadataCacheRequest) returns (UpdateTableMetadataCacheResponse) { + } } diff --git a/pkg/server/settingswatcher/settings_watcher_external_test.go b/pkg/server/settingswatcher/settings_watcher_external_test.go index 6a593b799c12..c0a8e0e37887 100644 --- a/pkg/server/settingswatcher/settings_watcher_external_test.go +++ b/pkg/server/settingswatcher/settings_watcher_external_test.go @@ -601,15 +601,15 @@ func TestStaleRowsDoNotCauseSettingsToRegress(t *testing.T) { tombstone := setting1KV tombstone.Value.RawBytes = nil - require.NoError(t, stream.Send(newRangeFeedEvent(setting1KV, ts1))) + require.NoError(t, stream.SendUnbuffered(newRangeFeedEvent(setting1KV, ts1))) settingIsSoon(t, newSettingValue) - require.NoError(t, stream.Send(newRangeFeedEvent(tombstone, ts0))) + require.NoError(t, stream.SendUnbuffered(newRangeFeedEvent(tombstone, ts0))) settingStillHasValueAfterAShortWhile(t, newSettingValue) - require.NoError(t, stream.Send(newRangeFeedEvent(tombstone, ts2))) + require.NoError(t, stream.SendUnbuffered(newRangeFeedEvent(tombstone, ts2))) settingIsSoon(t, defaultFakeSettingValue) - require.NoError(t, stream.Send(newRangeFeedEvent(setting1KV, ts1))) + require.NoError(t, stream.SendUnbuffered(newRangeFeedEvent(setting1KV, ts1))) settingStillHasValueAfterAShortWhile(t, defaultFakeSettingValue) } diff --git a/pkg/server/span_download.go b/pkg/server/span_download.go index 639d7fd1a3ca..57a9bc884a20 100644 --- a/pkg/server/span_download.go +++ b/pkg/server/span_download.go @@ -51,7 +51,7 @@ func (s *systemStatusServer) DownloadSpan( if len(req.NodeID) > 0 { _, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { diff --git a/pkg/server/span_stats_server.go b/pkg/server/span_stats_server.go index 07fb70be942d..393cc771ec59 100644 --- a/pkg/server/span_stats_server.go +++ b/pkg/server/span_stats_server.go @@ -13,6 +13,7 @@ package server import ( "context" "fmt" + "sort" "strconv" "time" @@ -25,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/rangedesc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -151,6 +153,7 @@ func (s *systemStatusServer) spanStatsFanOut( res.SpanToStats[spanStr].ApproximateDiskBytes += spanStats.ApproximateDiskBytes res.SpanToStats[spanStr].RemoteFileBytes += spanStats.RemoteFileBytes res.SpanToStats[spanStr].ExternalFileBytes += spanStats.ExternalFileBytes + res.SpanToStats[spanStr].StoreIDs = util.CombineUnique(res.SpanToStats[spanStr].StoreIDs, spanStats.StoreIDs) // Logical values: take the values from the node that responded first. // TODO: This should really be read from the leaseholder. @@ -214,6 +217,7 @@ func (s *systemStatusServer) statsForSpan( // First, get the approximate disk bytes from each store. err = s.stores.VisitStores(func(store *kvserver.Store) error { + spanStats.StoreIDs = append(spanStats.StoreIDs, store.StoreID()) approxDiskBytes, remoteBytes, externalBytes, err := store.TODOEngine().ApproximateDiskBytes(rSpan.Key.AsRawKey(), rSpan.EndKey.AsRawKey()) if err != nil { return err @@ -224,6 +228,10 @@ func (s *systemStatusServer) statsForSpan( return nil }) + sort.Slice(spanStats.StoreIDs, func(i, j int) bool { + return spanStats.StoreIDs[i] < spanStats.StoreIDs[j] + }) + if err != nil { return nil, err } @@ -287,6 +295,7 @@ func (s *systemStatusServer) statsForSpan( log.VEventf(ctx, 1, "Range %v exceeds span %v, calculating stats for subspan %v", descSpan, rSpan, roachpb.RSpan{Key: scanStart, EndKey: scanEnd}, ) + err = s.stores.VisitStores(func(s *kvserver.Store) error { stats, err := storage.ComputeStats( ctx, @@ -303,7 +312,6 @@ func (s *systemStatusServer) statsForSpan( spanStats.TotalStats.Add(stats) return nil }) - if err != nil { return nil, err } diff --git a/pkg/server/span_stats_test.go b/pkg/server/span_stats_test.go index 27f585427c8b..152fb4bdc952 100644 --- a/pkg/server/span_stats_test.go +++ b/pkg/server/span_stats_test.go @@ -162,6 +162,18 @@ func TestSpanStatsFanOut(t *testing.T) { ) require.NoError(t, err) + equalStoreIDs := func(a, b []roachpb.StoreID) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true + } + // Verify stats across different spans. for _, tcase := range testCases { rSpan, err := keys.SpanAddr(tcase.span) @@ -169,6 +181,14 @@ func TestSpanStatsFanOut(t *testing.T) { // Assert expected values from multi-span request spanStats := multiResult.SpanToStats[tcase.span.String()] + if !equalStoreIDs([]roachpb.StoreID{1, 2, 3}, spanStats.StoreIDs) { + return errors.Newf("Multi-span: expected storeIDs %v in span [%s - %s], found %v", + []roachpb.StoreID{1}, + rSpan.Key.String(), + rSpan.EndKey.String(), + spanStats.StoreIDs, + ) + } if tcase.expectedRanges != spanStats.RangeCount { return errors.Newf("Multi-span: expected %d ranges in span [%s - %s], found %d", tcase.expectedRanges, diff --git a/pkg/server/sql_stats.go b/pkg/server/sql_stats.go index 6b90407a5fd7..ff190b532880 100644 --- a/pkg/server/sql_stats.go +++ b/pkg/server/sql_stats.go @@ -53,7 +53,7 @@ func (s *statusServer) ResetSQLStats( if len(req.NodeID) > 0 { requestedNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { controller.ResetLocalSQLStats(ctx) diff --git a/pkg/server/srverrors/errors.go b/pkg/server/srverrors/errors.go index df6a85d72fd1..b39f882f0f5b 100644 --- a/pkg/server/srverrors/errors.go +++ b/pkg/server/srverrors/errors.go @@ -12,7 +12,6 @@ package srverrors import ( "context" - "fmt" "net/http" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -30,8 +29,7 @@ func ServerError(ctx context.Context, err error) error { // Include the PGCode in the message for easier troubleshooting errCode := pgerror.GetPGCode(err).String() if errCode != pgcode.Uncategorized.String() { - errMessage := fmt.Sprintf("%s Error Code: %s", ErrAPIInternalErrorString, errCode) - return grpcstatus.Errorf(codes.Internal, errMessage) + return grpcstatus.Errorf(codes.Internal, "%s Error Code: %s", ErrAPIInternalErrorString, errCode) } // The error is already grpcstatus formatted error. @@ -56,7 +54,7 @@ func ServerErrorf(ctx context.Context, format string, args ...interface{}) error var ErrAPIInternalErrorString = "An internal server error has occurred. Please check your CockroachDB logs for more details." // ErrAPIInternalError is the gRPC status error returned when an internal error was encountered. -var ErrAPIInternalError = grpcstatus.Errorf( +var ErrAPIInternalError = grpcstatus.Error( codes.Internal, ErrAPIInternalErrorString, ) diff --git a/pkg/server/statements.go b/pkg/server/statements.go index d60cae257e06..9533cd9ec6e0 100644 --- a/pkg/server/statements.go +++ b/pkg/server/statements.go @@ -59,7 +59,7 @@ func (s *statusServer) Statements( if len(req.NodeID) > 0 { requestedNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { return statementsLocal( diff --git a/pkg/server/status.go b/pkg/server/status.go index b47dcf1cc9d3..a19809c56388 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -74,6 +74,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlinstance" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" "github.com/cockroachdb/cockroach/pkg/util/grpcutil" @@ -488,6 +489,10 @@ type statusServer struct { // take 2^16 seconds (18 hours) to hit any one of them. cancelSemaphore *quotapool.IntPool + // updateTableMetadataJobSignal is used to signal the updateTableMetadataCacheJob + // to execute. + updateTableMetadataJobSignal chan struct{} + knobs *TestingKnobs } @@ -604,8 +609,9 @@ func newStatusServer( internalExecutor: internalExecutor, // See the docstring on cancelSemaphore for details about this initialization. - cancelSemaphore: quotapool.NewIntPool("pgwire-cancel", 256), - knobs: knobs, + cancelSemaphore: quotapool.NewIntPool("pgwire-cancel", 256), + updateTableMetadataJobSignal: make(chan struct{}), + knobs: knobs, } return server @@ -733,7 +739,7 @@ func (s *systemStatusServer) Gossip( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { @@ -779,7 +785,7 @@ func (s *systemStatusServer) EngineStats( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -815,7 +821,7 @@ func (s *systemStatusServer) Allocator( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -1044,7 +1050,7 @@ func (s *statusServer) Certificates( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if s.cfg.Insecure { @@ -1165,7 +1171,7 @@ func (s *statusServer) Details( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -1229,7 +1235,7 @@ func (s *statusServer) GetFiles( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -1289,7 +1295,7 @@ func (s *statusServer) LogFilesList( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -1324,7 +1330,7 @@ func (s *statusServer) LogFile( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -1430,7 +1436,7 @@ func (s *statusServer) Logs( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -1518,7 +1524,7 @@ func (s *statusServer) Stacks( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -1594,7 +1600,7 @@ func (s *statusServer) processProfileProtoGoroutines( } if err := mergedProfiles.Write(res); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } return res.Bytes(), nil @@ -1650,7 +1656,7 @@ func (s *statusServer) processCPUProfilesFromAllNodes( var buf bytes.Buffer if err := mergedProfiles.Write(&buf); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } return buf.Bytes(), nil } @@ -1747,7 +1753,7 @@ func (s *statusServer) Profile( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -2040,7 +2046,7 @@ func (s *statusServer) nodeStatus( ) (*statuspb.NodeStatus, error) { nodeID, _, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } key := keys.NodeStatusKey(nodeID) @@ -2209,7 +2215,7 @@ func (s *statusServer) Metrics( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -2385,7 +2391,7 @@ func (s *systemStatusServer) rangesHelper( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, 0, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, 0, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -2495,6 +2501,7 @@ func (s *systemStatusServer) rangesHelper( NoLease: metrics.Leader && !metrics.LeaseValid && !metrics.Quiescent, QuiescentEqualsTicking: raftStatus != nil && metrics.Quiescent == metrics.Ticking, RaftLogTooLarge: metrics.RaftLogTooLarge, + RangeTooLarge: metrics.RangeTooLarge, CircuitBreakerError: len(state.CircuitBreakerError) > 0, PausedFollowers: metrics.PausedFollowerCount > 0, }, @@ -2564,7 +2571,7 @@ func (s *systemStatusServer) rangesHelper( return nil }) if err != nil { - return nil, 0, status.Errorf(codes.Internal, err.Error()) + return nil, 0, status.Error(codes.Internal, err.Error()) } var next int if limit > 0 { @@ -2641,7 +2648,7 @@ func (s *systemStatusServer) TenantRanges( nodeIDString := nodeID.String() _, local, err := s.parseNodeID(nodeIDString) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } nodeReq := &serverpb.RangesRequest{ @@ -2753,7 +2760,7 @@ func (s *systemStatusServer) HotRanges( if len(req.NodeID) > 0 { requestedNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } // Only hot ranges from the local node. @@ -3247,7 +3254,7 @@ func iterateNodes[Client, Result any]( responseFn(res.nodeID, res.response) } case <-ctx.Done(): - resultErr = errors.Errorf("request of %s canceled before completion", errorCtx) + resultErr = errors.Wrapf(ctx.Err(), "request of %s canceled before completion", errorCtx) } numNodes-- } @@ -3438,7 +3445,7 @@ func (s *statusServer) CancelSession( reqUsername, err := username.MakeSQLUsernameFromPreNormalizedStringChecked(req.Username) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } session, ok := s.sessionRegistry.GetSessionByID(sessionID) @@ -3491,7 +3498,7 @@ func (s *statusServer) CancelQuery( reqUsername, err := username.MakeSQLUsernameFromPreNormalizedStringChecked(req.Username) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } session, ok := s.sessionRegistry.GetSessionByQueryID(queryID) @@ -3679,7 +3686,7 @@ func (s *statusServer) ListExecutionInsights( if len(req.NodeID) > 0 { requestedNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { return s.localExecutionInsights(ctx) @@ -3770,7 +3777,7 @@ func (s *systemStatusServer) TenantServiceStatus( if len(req.NodeID) > 0 { reqNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { @@ -3840,7 +3847,7 @@ func (s *statusServer) Diagnostics( ctx = s.AnnotateCtx(ctx) nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -3869,7 +3876,7 @@ func (s *systemStatusServer) Stores( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { @@ -3998,7 +4005,7 @@ func (s *statusServer) JobRegistryStatus( nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if !local { status, err := s.dialNode(ctx, nodeID) @@ -4066,7 +4073,7 @@ func (s *statusServer) TxnIDResolution( requestedNodeID, local, err := s.parseNodeID(req.CoordinatorID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { return s.localTxnIDResolution(req), nil @@ -4110,7 +4117,7 @@ func (s *statusServer) TransactionContentionEvents( if len(req.NodeID) > 0 { requestedNodeID, local, err := s.parseNodeID(req.NodeID) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } if local { return s.localTransactionContentionEvents(shouldRedactContendingKey), nil @@ -4195,3 +4202,55 @@ func (s *statusServer) ListJobProfilerExecutionDetails( } return &serverpb.ListJobProfilerExecutionDetailsResponse{Files: files}, nil } + +func (s *statusServer) localUpdateTableMetadataCache() ( + *serverpb.UpdateTableMetadataCacheResponse, + error, +) { + select { + case s.updateTableMetadataJobSignal <- struct{}{}: + default: + return nil, status.Errorf(codes.Unavailable, "update table metadata cache job is not ready to start execution") + } + return &serverpb.UpdateTableMetadataCacheResponse{}, nil +} + +func (s *statusServer) UpdateTableMetadataCache( + ctx context.Context, req *serverpb.UpdateTableMetadataCacheRequest, +) (*serverpb.UpdateTableMetadataCacheResponse, error) { + if req.Local { + return s.localUpdateTableMetadataCache() + } + ctx = s.AnnotateCtx(ctx) + + // Get the node id for the job. + row, err := s.internalExecutor.QueryRow(ctx, "get-node-id", nil, ` +SELECT claim_instance_id +FROM system.jobs +WHERE id = $1 +`, jobs.UpdateTableMetadataCacheJobID) + if err != nil { + return nil, status.Errorf(codes.Internal, "%s", err.Error()) + } + if row == nil { + return nil, status.Error(codes.FailedPrecondition, "no job record found") + } + if row[0] == tree.DNull { + return nil, status.Error(codes.Unavailable, "update table metadata cache job is unclaimed") + } + + nodeID := roachpb.NodeID(*row[0].(*tree.DInt)) + statusClient, err := s.dialNode(ctx, nodeID) + if err != nil { + return nil, srverrors.ServerError(ctx, err) + } + return statusClient.UpdateTableMetadataCache(ctx, &serverpb.UpdateTableMetadataCacheRequest{ + Local: true, + }) +} + +// GetUpdateTableMetadataCacheSignal returns the signal channel used +// in the UpdateTableMetadataCache rpc. +func (s *statusServer) GetUpdateTableMetadataCacheSignal() chan struct{} { + return s.updateTableMetadataJobSignal +} diff --git a/pkg/server/status_local_file_retrieval.go b/pkg/server/status_local_file_retrieval.go index 6abbad3aa03a..6bd86b2c9fbc 100644 --- a/pkg/server/status_local_file_retrieval.go +++ b/pkg/server/status_local_file_retrieval.go @@ -72,7 +72,7 @@ func profileLocal( if req.Labels { buf.WriteString(fmt.Sprintf("Stacks for node: %d\n\n", nodeID)) if err := p.WriteTo(&buf, 1); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } buf.WriteString("\n\n") @@ -82,7 +82,7 @@ func profileLocal( } } else { if err := p.WriteTo(&buf, 0); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } } return &serverpb.JSONResponse{Data: buf.Bytes()}, nil @@ -98,7 +98,7 @@ func profileLocal( } var buf bytes.Buffer if err := p.WriteTo(&buf, 0); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } return &serverpb.JSONResponse{Data: buf.Bytes()}, nil } @@ -153,7 +153,7 @@ func getLocalFiles( var resp serverpb.GetFilesResponse for _, pattern := range req.Patterns { if err := checkFilePattern(pattern); err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } filepaths, err := filepath.Glob(filepath.Join(dir, pattern)) if err != nil { @@ -163,13 +163,13 @@ func getLocalFiles( for _, path := range filepaths { fileinfo, err := statFileFn(path) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } var contents []byte if !req.ListOnly { contents, err = readFileFn(path) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } } resp.Files = append(resp.Files, diff --git a/pkg/server/tenant.go b/pkg/server/tenant.go index a06619e825e2..ee711e4bc4ad 100644 --- a/pkg/server/tenant.go +++ b/pkg/server/tenant.go @@ -835,6 +835,7 @@ func (s *SQLServerWrapper) PreStart(ctx context.Context) error { s.stopper, s.sqlServer.cfg.TestingKnobs, orphanedLeasesTimeThresholdNanos, + false, /* initialStart */ ); err != nil { return err } diff --git a/pkg/server/testing_knobs.go b/pkg/server/testing_knobs.go index 2305f2da0752..a21e6777037d 100644 --- a/pkg/server/testing_knobs.go +++ b/pkg/server/testing_knobs.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/server/diagnostics" + "github.com/cockroachdb/cockroach/pkg/server/license" "github.com/cockroachdb/cockroach/pkg/storage/fs" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) @@ -46,6 +47,8 @@ type TestingKnobs struct { ContextTestingKnobs rpc.ContextTestingKnobs // DiagnosticsTestingKnobs allows customization of diagnostics testing knobs. DiagnosticsTestingKnobs diagnostics.TestingKnobs + // LicenseTestingKnobs allows customization of license testing knobs. + LicenseTestingKnobs license.TestingKnobs // If set, use this listener for RPC (and possibly SQL, depending on // the SplitListenSQL setting), instead of binding a new listener. diff --git a/pkg/settings/options.go b/pkg/settings/options.go index b7bf946b150c..5c28eab00ec5 100644 --- a/pkg/settings/options.go +++ b/pkg/settings/options.go @@ -45,7 +45,7 @@ func WithName(name SettingName) SettingOption { } // WithRetiredName configures a previous user-visible name of the setting, -// when that name was diferent from the key and is not in use any more. +// when that name was different from the key and is not in use any more. func WithRetiredName(name SettingName) SettingOption { return SettingOption{commonOpt: func(c *common) { registerAlias(c.key, name, NameRetired) diff --git a/pkg/settings/registry.go b/pkg/settings/registry.go index 9e0d23dcac24..bc4c073e8f81 100644 --- a/pkg/settings/registry.go +++ b/pkg/settings/registry.go @@ -248,6 +248,7 @@ var retiredSettings = map[InternalKey]struct{}{ // removed as of 24.3 "bulkio.backup.split_keys_on_timestamps": {}, + "sql.create_tenant.default_template": {}, } // sqlDefaultSettings is the list of "grandfathered" existing sql.defaults diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 02d01c5890bc..299fff20ffb6 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -234,7 +234,6 @@ go_library( "show_external_connection.go", "show_fingerprints.go", "show_histogram.go", - "show_logical_replication_jobs.go", "show_stats.go", "show_tenant.go", "show_trace.go", @@ -358,6 +357,7 @@ go_library( "//pkg/security/password", "//pkg/security/sessionrevival", "//pkg/security/username", + "//pkg/server/license", "//pkg/server/pgurl", "//pkg/server/serverpb", "//pkg/server/settingswatcher", diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 29a5e169c06b..6fe29fc1ee27 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -462,6 +462,11 @@ func (n *alterTableNode) startExec(params runParams) error { } descriptorChanged = true for _, updated := range affected { + // Disallow schema change if the FK references a table whose schema is + // locked. + if err := checkTableSchemaUnlocked(updated); err != nil { + return err + } if err := params.p.writeSchemaChange( params.ctx, updated, descpb.InvalidMutationID, tree.AsStringWithFQNames(n.n, params.Ann()), ); err != nil { diff --git a/pkg/sql/catalog/BUILD.bazel b/pkg/sql/catalog/BUILD.bazel index 105b137fcf20..e859418225e6 100644 --- a/pkg/sql/catalog/BUILD.bazel +++ b/pkg/sql/catalog/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "metadata.go", "post_deserialization_changes.go", "schema.go", + "serial_helper.go", "system_table.go", "table_col_map.go", "table_col_set.go", diff --git a/pkg/sql/catalog/bootstrap/metadata.go b/pkg/sql/catalog/bootstrap/metadata.go index 5dd28f26e40d..fbd3e3ad59b4 100644 --- a/pkg/sql/catalog/bootstrap/metadata.go +++ b/pkg/sql/catalog/bootstrap/metadata.go @@ -455,6 +455,9 @@ func addSystemDescriptorsToSchema(target *MetadataSchema) { target.AddDescriptor(systemschema.TransactionExecInsightsTable) target.AddDescriptor(systemschema.StatementExecInsightsTable) + // Tables introduced in 24.3 + target.AddDescriptor(systemschema.TableMetadata) + // Adding a new system table? It should be added here to the metadata schema, // and also created as a migration for older clusters. // If adding a call to AddDescriptor or AddDescriptorForSystemTenant, please @@ -467,7 +470,7 @@ func addSystemDescriptorsToSchema(target *MetadataSchema) { // NumSystemTablesForSystemTenant is the number of system tables defined on // the system tenant. This constant is only defined to avoid having to manually // update auto stats tests every time a new system table is added. -const NumSystemTablesForSystemTenant = 56 +const NumSystemTablesForSystemTenant = 57 // addSplitIDs adds a split point for each of the PseudoTableIDs to the supplied // MetadataSchema. diff --git a/pkg/sql/catalog/bootstrap/testdata/testdata b/pkg/sql/catalog/bootstrap/testdata/testdata index 6843820f90c9..d18d872766e8 100644 --- a/pkg/sql/catalog/bootstrap/testdata/testdata +++ b/pkg/sql/catalog/bootstrap/testdata/testdata @@ -1,7 +1,7 @@ -system hash=2ae2763375bdffaed3ccc39f6c4f9cabd4aa28567c542cc8e6e150c3497e3b4b +system hash=0d290f74075392a317f7207085afc79d8dbd2b1f73149d741dcb6fb374fc2e35 ---- [{"key":"8b"} -,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d100218002006"} +,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d10021800200e"} ,{"key":"8b898b8a89","value":"030a8e030a0a64657363726970746f721803200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422f0a0a64657363726970746f7210021a0c08081000180030005011600020013000680070007800800100880100980100480352710a077072696d61727910011801220269642a0a64657363726970746f72300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b201240a1066616d5f325f64657363726970746f7210021a0a64657363726970746f7220022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} ,{"key":"8b898c8a89","value":"030ac7050a0575736572731804200128013a00422d0a08757365726e616d6510011a0c0807100018003000501960002000300068007000780080010088010098010042330a0e68617368656450617373776f726410021a0c0808100018003000501160002001300068007000780080010088010098010042320a066973526f6c6510031a0c08001000180030005010600020002a0566616c73653000680070007800800100880100980100422c0a07757365725f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055290010a077072696d617279100118012208757365726e616d652a0e68617368656450617373776f72642a066973526f6c652a07757365725f6964300140004a10080010001a00200028003000380040005a007002700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005a740a1175736572735f757365725f69645f696478100218012207757365725f69643004380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201240a077072696d61727910001a08757365726e616d651a07757365725f6964200120042804b2012c0a1466616d5f325f68617368656450617373776f726410021a0e68617368656450617373776f726420022802b2011c0a0c66616d5f335f6973526f6c6510031a066973526f6c6520032803b80104c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b898d8a89","value":"030afd020a057a6f6e65731805200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422b0a06636f6e66696710021a0c080810001800300050116000200130006800700078008001008801009801004803526d0a077072696d61727910011801220269642a06636f6e666967300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b2011c0a0c66616d5f325f636f6e66696710021a06636f6e66696720022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} @@ -38,7 +38,7 @@ system hash=2ae2763375bdffaed3ccc39f6c4f9cabd4aa28567c542cc8e6e150c3497e3b4b ,{"key":"8b89b38a89","value":"030a8f200a167472616e73616374696f6e5f73746174697374696373182b200128013a0042330a0d616767726567617465645f747310011a0d080910001800300050a00960002000300068007000780080010088010098010042330a0e66696e6765727072696e745f696410021a0c08081000180030005011600020003000680070007800800100880100980100422d0a086170705f6e616d6510031a0c08071000180030005019600020003000680070007800800100880100980100422c0a076e6f64655f696410041a0c0801104018003000501460002000300068007000780080010088010098010042380a0c6167675f696e74657276616c10051a13080610001800300050a20960006a040800100020003000680070007800800100880100980100422e0a086d6574616461746110061a0d081210001800300050da1d60002000300068007000780080010088010098010042300a0a7374617469737469637310071a0d081210001800300050da1d60002000300068007000780080010088010098010042cf010a43637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3810081a0c080110201800300050176000200030015a656d6f6428666e76333228637264625f696e7465726e616c2e646174756d735f746f5f627974657328616767726567617465645f74732c206170705f6e616d652c2066696e6765727072696e745f69642c206e6f64655f696429292c20383a3a3a494e54382968007000780080010088010098010042710a0f657865637574696f6e5f636f756e7410091a0c080110401800300050146000200130005a3b2828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e27636e74273a3a3a535452494e47293a3a494e5438680070007800800100880100980100428a010a0f736572766963655f6c6174656e6379100a1a0d080210401800300050bd056000200130005a53282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e277376634c6174273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f4154386800700078008001008801009801004297010a0d6370755f73716c5f6e616e6f73100b1a0d080210401800300050bd056000200130005a62282828737461746973746963732d3e27657865637574696f6e5f73746174697374696373273a3a3a535452494e47292d3e2763707553514c4e616e6f73273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100429c010a0f636f6e74656e74696f6e5f74696d65100c1a0d080210401800300050bd056000200130005a65282828737461746973746963732d3e27657865637574696f6e5f73746174697374696373273a3a3a535452494e47292d3e27636f6e74656e74696f6e54696d65273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f41543868007000780080010088010098010042dc010a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d65100d1a0d080210401800300050bd056000200130005a95012828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e3e27636e74273a3a3a535452494e47293a3a464c4f415438202a20282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e277376634c6174273a3a3a535452494e47292d3e3e276d65616e273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100428a010a0b7039395f6c6174656e6379100e1a0d080210401800300050bd056000200130005a57282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e276c6174656e6379496e666f273a3a3a535452494e47292d3e27703939273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100480f5280040a077072696d617279100118012243637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f38220d616767726567617465645f7473220e66696e6765727072696e745f696422086170705f6e616d6522076e6f64655f69642a0c6167675f696e74657276616c2a086d657461646174612a0a737461746973746963732a0f657865637574696f6e5f636f756e742a0f736572766963655f6c6174656e63792a0d6370755f73716c5f6e616e6f732a0f636f6e74656e74696f6e5f74696d652a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d652a0b7039395f6c6174656e637930083001300230033004400040004000400040004a10080010001a00200028003000380040005a007005700670077009700a700b700c700d700e7a0408002000800100880100900104980101a2017b08011243637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f381808220d616767726567617465645f747322086170705f6e616d65220e66696e6765727072696e745f696422076e6f64655f6964a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a85010a1566696e6765727072696e745f73746174735f69647810021800220e66696e6765727072696e745f69643002380838013803380440004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005acb010a13657865637574696f6e5f636f756e745f69647810031800220d616767726567617465645f747322086170705f6e616d65220f657865637574696f6e5f636f756e743001300330093808380238044000400040014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005acd010a13736572766963655f6c6174656e63795f69647810041800220d616767726567617465645f747322086170705f6e616d65220f736572766963655f6c6174656e637930013003300a3808380238044000400040014a10080010001a00200028003000380040005a00680a7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005ac9010a116370755f73716c5f6e616e6f735f69647810051800220d616767726567617465645f747322086170705f6e616d65220d6370755f73716c5f6e616e6f7330013003300b3808380238044000400040014a10080010001a00200028003000380040005a00680b7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005acd010a13636f6e74656e74696f6e5f74696d655f69647810061800220d616767726567617465645f747322086170705f6e616d65220f636f6e74656e74696f6e5f74696d6530013003300c3808380238044000400040014a10080010001a00200028003000380040005a00680c7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005aeb010a22746f74616c5f657374696d617465645f657865637574696f6e5f74696d655f69647810071800220d616767726567617465645f747322086170705f6e616d65221e746f74616c5f657374696d617465645f657865637574696f6e5f74696d6530013003300d3808380238044000400040014a10080010001a00200028003000380040005a00680d7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005ac5010a0f7039395f6c6174656e63795f69647810081800220d616767726567617465645f747322086170705f6e616d65220b7039395f6c6174656e637930013003300e3808380238044000400040014a10080010001a00200028003000380040005a00680e7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e901000000000000000060096a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100a201ef010a9701637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3820494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e5438291249636865636b5f637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3818002808300038014002b201b3020a077072696d61727910001a43637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f381a0d616767726567617465645f74731a0e66696e6765727072696e745f69641a086170705f6e616d651a076e6f64655f69641a0c6167675f696e74657276616c1a086d657461646174611a0a737461746973746963731a0f657865637574696f6e5f636f756e741a0f736572766963655f6c6174656e63791a0d6370755f73716c5f6e616e6f731a0f636f6e74656e74696f6e5f74696d651a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d651a0b7039395f6c6174656e6379200820012002200320042005200620072009200a200b200c200d200e2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b48a89","value":"030aed050a1664617461626173655f726f6c655f73657474696e6773182c200128013a0042300a0b64617461626173655f696410011a0c080c100018003000501a600020003000680070007800800100880100980100422e0a09726f6c655f6e616d6510021a0c08071000180030005019600020003000680070007800800100880100980100423e0a0873657474696e677310031a1d080f100018003000380750f1075a0c080710001800300050196000600020003000680070007800800100880100980100422c0a07726f6c655f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055292010a077072696d61727910011801220b64617461626173655f69642209726f6c655f6e616d652a0873657474696e67732a07726f6c655f696430013002400040004a10080010001a00200028003000380040005a00700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005aae010a2e64617461626173655f726f6c655f73657474696e67735f64617461626173655f69645f726f6c655f69645f6b657910021801220b64617461626173655f69642207726f6c655f69642a0873657474696e6773300130043802400040004a10080010001a00200028003000380040005a0070037a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201400a077072696d61727910001a0b64617461626173655f69641a09726f6c655f6e616d651a0873657474696e67731a07726f6c655f696420012002200320042800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b58a89","value":"030aba0b0a0c74656e616e745f7573616765182d200128013a00422e0a0974656e616e745f696410011a0c0801104018003000501460002000300068007000780080010088010098010042300a0b696e7374616e63655f696410021a0c0801104018003000501460002000300068007000780080010088010098010042350a106e6578745f696e7374616e63655f696410031a0c0801104018003000501460002000300068007000780080010088010098010042310a0b6c6173745f75706461746510041a0d080510001800300050da0860002000300068007000780080010088010098010042340a0e72755f62757273745f6c696d697410051a0d080210401800300050bd0560002001300068007000780080010088010098010042340a0e72755f726566696c6c5f7261746510061a0d080210401800300050bd0560002001300068007000780080010088010098010042300a0a72755f63757272656e7410071a0d080210401800300050bd0560002001300068007000780080010088010098010042370a1163757272656e745f73686172655f73756d10081a0d080210401800300050bd0560002001300068007000780080010088010098010042360a11746f74616c5f636f6e73756d7074696f6e10091a0c0808100018003000501160002001300068007000780080010088010098010042330a0e696e7374616e63655f6c65617365100a1a0c0808100018003000501160002001300068007000780080010088010098010042310a0c696e7374616e63655f736571100b1a0c0801104018003000501460002001300068007000780080010088010098010042350a0f696e7374616e63655f736861726573100c1a0d080210401800300050bd0560002001300068007000780080010088010098010042320a0d63757272656e745f7261746573100d1a0c08081000180030005011600020013000680070007800800100880100980100422f0a0a6e6578745f7261746573100e1a0c08081000180030005011600020013000680070007800800100880100980100480f52ce020a077072696d61727910011801220974656e616e745f6964220b696e7374616e63655f69642a106e6578745f696e7374616e63655f69642a0b6c6173745f7570646174652a0e72755f62757273745f6c696d69742a0e72755f726566696c6c5f726174652a0a72755f63757272656e742a1163757272656e745f73686172655f73756d2a11746f74616c5f636f6e73756d7074696f6e2a0e696e7374616e63655f6c656173652a0c696e7374616e63655f7365712a0f696e7374616e63655f7368617265732a0d63757272656e745f72617465732a0a6e6578745f726174657330013002400040004a10080010001a00200028003000380040005a007003700470057006700770087009700a700b700c700d700e7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201fc010a077072696d61727910001a0974656e616e745f69641a0b696e7374616e63655f69641a106e6578745f696e7374616e63655f69641a0b6c6173745f7570646174651a0e72755f62757273745f6c696d69741a0e72755f726566696c6c5f726174651a0a72755f63757272656e741a1163757272656e745f73686172655f73756d1a11746f74616c5f636f6e73756d7074696f6e1a0e696e7374616e63655f6c656173651a0c696e7374616e63655f7365711a0f696e7374616e63655f7368617265731a0d63757272656e745f72617465731a0a6e6578745f7261746573200120022003200420052006200720082009200a200b200c200d200e2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800301880302a80300b00300d00300d80300e00300"} -,{"key":"8b89b68a89","value":"030af2050a0d73716c5f696e7374616e636573182e200128013a0042270a02696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046164647210021a0c08071000180030005019600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410031a0c08081000180030005011600020013000680070007800800100880100980100422e0a086c6f63616c69747910041a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0873716c5f6164647210051a0c0807100018003000501960002001300068007000780080010088010098010042300a0b637264625f726567696f6e10061a0c0808100018003000501160002000300068007000780080010088010098010042330a0e62696e6172795f76657273696f6e10071a0c08071000180030005019600020013000680070007800800100880100980100480852b4010a077072696d61727910021801220b637264625f726567696f6e220269642a04616464722a0a73657373696f6e5f69642a086c6f63616c6974792a0873716c5f616464722a0e62696e6172795f76657273696f6e30063001400040004a10080010001a00200028003000380040005a00700270037004700570077a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201620a077072696d61727910001a0269641a04616464721a0a73657373696f6e5f69641a086c6f63616c6974791a0873716c5f616464721a0b637264625f726567696f6e1a0e62696e6172795f76657273696f6e20012002200320042005200620072800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} +,{"key":"8b89b68a89","value":"030ac2060a0d73716c5f696e7374616e636573182e200128013a0042270a02696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046164647210021a0c08071000180030005019600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410031a0c08081000180030005011600020013000680070007800800100880100980100422e0a086c6f63616c69747910041a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0873716c5f6164647210051a0c0807100018003000501960002001300068007000780080010088010098010042300a0b637264625f726567696f6e10061a0c0808100018003000501160002000300068007000780080010088010098010042330a0e62696e6172795f76657273696f6e10071a0c0807100018003000501960002001300068007000780080010088010098010042300a0b69735f647261696e696e6710081a0c08001000180030005010600020013000680070007800800100880100980100480952c3010a077072696d61727910021801220b637264625f726567696f6e220269642a04616464722a0a73657373696f6e5f69642a086c6f63616c6974792a0873716c5f616464722a0e62696e6172795f76657273696f6e2a0b69735f647261696e696e6730063001400040004a10080010001a00200028003000380040005a007002700370047005700770087a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201710a077072696d61727910001a0269641a04616464721a0a73657373696f6e5f69641a086c6f63616c6974791a0873716c5f616464721a0b637264625f726567696f6e1a0e62696e6172795f76657273696f6e1a0b69735f647261696e696e67200120022003200420052006200720082800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} ,{"key":"8b89b78a89","value":"030a81040a137370616e5f636f6e66696775726174696f6e73182f200128013a00422e0a0973746172745f6b657910011a0c08081000180030005011600020003000680070007800800100880100980100422c0a07656e645f6b657910021a0c08081000180030005011600020003000680070007800800100880100980100422b0a06636f6e66696710031a0c080810001800300050116000200030006800700078008001008801009801004804527f0a077072696d61727910011801220973746172745f6b65792a07656e645f6b65792a06636f6e666967300140004a10080010001a00200028003000380040005a00700270037a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a2012f0a1373746172745f6b6579203c20656e645f6b6579120c636865636b5f626f756e6473180028012802300038004002b2012f0a077072696d61727910001a0973746172745f6b65791a07656e645f6b65791a06636f6e6669672001200220032800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800301880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b88a89","value":"030ad3020a0b726f6c655f69645f7365711830200128013a00422a0a0576616c756510011a0c08011040180030005014600020003000680070007800800100880100980100480052660a077072696d61727910011800220576616c7565300140004a10080010001a00200028003000380040005a007a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060006a250a0d0a0561646d696e10a00618a0060a0c0a04726f6f7410a00618a00612046e6f64651803800100880103980100b201160a077072696d61727910001a0576616c756520012801b80100c20100e2011a0801106418ffffffff0720642800320408001000380142004800e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880300a80300b00300d00300d80300e00300"} ,{"key":"8b89ba8a89","value":"030aea050a0f74656e616e745f73657474696e67731832200128013a00422e0a0974656e616e745f696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046e616d6510021a0c08071000180030005019600020003000680070007800800100880100980100422a0a0576616c756510031a0c0807100018003000501960002000300068007000780080010088010098010042450a0c6c6173745f7570646174656410041a0d080510001800300050da08600020002a116e6f7728293a3a3a54494d455354414d503000680070007800800100880100980100422f0a0a76616c75655f7479706510051a0c08071000180030005019600020003000680070007800800100880100980100422b0a06726561736f6e10061a0c08071000180030005019600020013000680070007800800100880100980100480752a5010a077072696d61727910011801220974656e616e745f696422046e616d652a0576616c75652a0c6c6173745f757064617465642a0a76616c75655f747970652a06726561736f6e30013002400040004a10080010001a00200028003000380040005a0070037004700570067a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b20185010a3966616d5f305f74656e616e745f69645f6e616d655f76616c75655f6c6173745f757064617465645f76616c75655f747970655f726561736f6e10001a0974656e616e745f69641a046e616d651a0576616c75651a0c6c6173745f757064617465641a0a76616c75655f747970651a06726561736f6e2001200220032004200520062800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} @@ -58,6 +58,7 @@ system hash=2ae2763375bdffaed3ccc39f6c4f9cabd4aa28567c542cc8e6e150c3497e3b4b ,{"key":"8b89c88a89","value":"030abf0a0a0f6d7663635f737461746973746963731840200128013a0042450a0a637265617465645f617410011a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042300a0b64617461626173655f696410021a0c08011040180030005014600020003000680070007800800100880100980100422d0a087461626c655f696410031a0c08011040180030005014600020003000680070007800800100880100980100422d0a08696e6465785f696410041a0c0801104018003000501460002000300068007000780080010088010098010042300a0a7374617469737469637310051a0d081210001800300050da1d60002000300068007000780080010088010098010042ab010a3f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313610061a0c080110201800300050176000200030015a456d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328637265617465645f61742929292c2031363a3a3a494e543829680070007800800101880100980100480752e4020a146d7663635f737461746973746963735f706b657910011801223f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f3136220a637265617465645f6174220b64617461626173655f696422087461626c655f69642208696e6465785f69642a0a7374617469737469637330063001300230033004400040004000400040004a10080010001a00200028003000380040005a0070057a0408002000800100880100900104980101a201720801123f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f31361810220a637265617465645f6174220b64617461626173655f69642208696e6465785f696422087461626c655f6964a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a201bd020ae901637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291245636865636b5f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313618002806300038014002b201500a077072696d61727910001a0a637265617465645f61741a0b64617461626173655f69641a087461626c655f69641a08696e6465785f69641a0a73746174697374696373200120022003200420052805b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89c98a89","value":"030ab5170a1e7472616e73616374696f6e5f657865637574696f6e5f696e7369676874731841200128013a0042340a0e7472616e73616374696f6e5f696410011a0d080e100018003000508617600020003000680070007800800100880100980100423f0a1a7472616e73616374696f6e5f66696e6765727072696e745f696410021a0c0808100018003000501160002000300068007000780080010088010098010042320a0d71756572795f73756d6d61727910031a0c0807100018003000501960002001300068007000780080010088010098010042310a0c696d706c696369745f74786e10041a0c08001000180030005010600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410051a0c0807100018003000501960002000300068007000780080010088010098010042300a0a73746172745f74696d6510061a0d080910001800300050a009600020013000680070007800800100880100980100422e0a08656e645f74696d6510071a0d080910001800300050a009600020013000680070007800800100880100980100422e0a09757365725f6e616d6510081a0c08071000180030005019600020013000680070007800800100880100980100422d0a086170705f6e616d6510091a0c0807100018003000501960002001300068007000780080010088010098010042320a0d757365725f7072696f72697479100a1a0c08071000180030005019600020013000680070007800800100880100980100422c0a0772657472696573100b1a0c0801104018003000501460002001300068007000780080010088010098010042360a116c6173745f72657472795f726561736f6e100c1a0c08071000180030005019600020013000680070007800800100880100980100423e0a0870726f626c656d73100d1a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100423c0a06636175736573100e1a1d080f104018003000380150f8075a0c08011040180030005014600060002001300068007000780080010088010098010042480a1273746d745f657865637574696f6e5f696473100f1a1d080f100018003000380750f1075a0c08071000180030005019600060002001300068007000780080010088010098010042320a0d6370755f73716c5f6e616e6f7310101a0c0801104018003000501460002001300068007000780080010088010098010042340a0f6c6173745f6572726f725f636f646510111a0c08071000180030005019600020013000680070007800800100880100980100422b0a0673746174757310121a0c08011040180030005014600020013000680070007800800100880100980100423b0a0f636f6e74656e74696f6e5f74696d6510131a13080610001800300050a20960006a04080010002001300068007000780080010088010098010042350a0f636f6e74656e74696f6e5f696e666f10141a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0764657461696c7310151a0d081210001800300050da1d60002001300068007000780080010088010098010042420a076372656174656410161a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a0010a2a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313610171a0c080110201800300050176000200030015a4f6d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328656e645f74696d652c2073746172745f74696d652929292c2031363a3a3a494e543829680070007800800101880100980100481852b6030a077072696d61727910011801220e7472616e73616374696f6e5f69642a1a7472616e73616374696f6e5f66696e6765727072696e745f69642a0d71756572795f73756d6d6172792a0c696d706c696369745f74786e2a0a73657373696f6e5f69642a0a73746172745f74696d652a08656e645f74696d652a09757365725f6e616d652a086170705f6e616d652a0d757365725f7072696f726974792a07726574726965732a116c6173745f72657472795f726561736f6e2a0870726f626c656d732a066361757365732a1273746d745f657865637574696f6e5f6964732a0d6370755f73716c5f6e616e6f732a0f6c6173745f6572726f725f636f64652a067374617475732a0f636f6e74656e74696f6e5f74696d652a0f636f6e74656e74696f6e5f696e666f2a0764657461696c732a0763726561746564300140004a10080010001a00200028003000380040005a0070027003700470057006700770087009700a700b700c700d700e700f70107011701270137014701570167a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a94010a1e7472616e73616374696f6e5f66696e6765727072696e745f69645f69647810021800221a7472616e73616374696f6e5f66696e6765727072696e745f69643002380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005af2010a0e74696d655f72616e67655f69647810031800222a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136220a73746172745f74696d652208656e645f74696d6530173006300738014000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a201460801122a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618102208656e645f74696d65220a73746172745f74696d65a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060046a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20193020ad401637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291230636865636b5f637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618002817300038014002b201e6020a077072696d61727910001a0e7472616e73616374696f6e5f69641a1a7472616e73616374696f6e5f66696e6765727072696e745f69641a0d71756572795f73756d6d6172791a0c696d706c696369745f74786e1a0a73657373696f6e5f69641a0a73746172745f74696d651a08656e645f74696d651a09757365725f6e616d651a086170705f6e616d651a0d757365725f7072696f726974791a07726574726965731a116c6173745f72657472795f726561736f6e1a0870726f626c656d731a066361757365731a1273746d745f657865637574696f6e5f6964731a0d6370755f73716c5f6e616e6f731a0f6c6173745f6572726f725f636f64651a067374617475731a0f636f6e74656e74696f6e5f74696d651a0f636f6e74656e74696f6e5f696e666f1a0764657461696c731a0763726561746564200120022003200420052006200720082009200a200b200c200d200e200f20102011201220132014201520162800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89ca8a89","value":"030a801e0a1c73746174656d656e745f657865637574696f6e5f696e7369676874731842200128013a00422f0a0a73657373696f6e5f696410011a0c0807100018003000501960002000300068007000780080010088010098010042340a0e7472616e73616374696f6e5f696410021a0d080e100018003000508617600020003000680070007800800100880100980100423f0a1a7472616e73616374696f6e5f66696e6765727072696e745f696410031a0c0808100018003000501160002000300068007000780080010088010098010042310a0c73746174656d656e745f696410041a0c08071000180030005019600020003000680070007800800100880100980100423d0a1873746174656d656e745f66696e6765727072696e745f696410051a0c08081000180030005011600020003000680070007800800100880100980100422c0a0770726f626c656d10061a0c08011040180030005014600020013000680070007800800100880100980100423c0a0663617573657310071a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100422a0a05717565727910081a0c08071000180030005019600020013000680070007800800100880100980100422b0a0673746174757310091a0c0801104018003000501460002001300068007000780080010088010098010042300a0a73746172745f74696d65100a1a0d080910001800300050a009600020013000680070007800800100880100980100422e0a08656e645f74696d65100b1a0d080910001800300050a009600020013000680070007800800100880100980100422e0a0966756c6c5f7363616e100c1a0c08001000180030005010600020013000680070007800800100880100980100422e0a09757365725f6e616d65100d1a0c08071000180030005019600020013000680070007800800100880100980100422d0a086170705f6e616d65100e1a0c0807100018003000501960002001300068007000780080010088010098010042320a0d757365725f7072696f72697479100f1a0c0807100018003000501960002001300068007000780080010088010098010042320a0d64617461626173655f6e616d6510101a0c08071000180030005019600020013000680070007800800100880100980100422e0a09706c616e5f6769737410111a0c08071000180030005019600020013000680070007800800100880100980100422c0a077265747269657310121a0c0801104018003000501460002001300068007000780080010088010098010042360a116c6173745f72657472795f726561736f6e10131a0c0807100018003000501960002001300068007000780080010088010098010042480a12657865637574696f6e5f6e6f64655f69647310141a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100424b0a15696e6465785f7265636f6d6d656e646174696f6e7310151a1d080f100018003000380750f1075a0c08071000180030005019600060002001300068007000780080010088010098010042310a0c696d706c696369745f74786e10161a0c0800100018003000501060002001300068007000780080010088010098010042320a0d6370755f73716c5f6e616e6f7310171a0c08011040180030005014600020013000680070007800800100880100980100422f0a0a6572726f725f636f646510181a0c08071000180030005019600020013000680070007800800100880100980100423b0a0f636f6e74656e74696f6e5f74696d6510191a13080610001800300050a20960006a04080010002001300068007000780080010088010098010042350a0f636f6e74656e74696f6e5f696e666f101a1a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0764657461696c73101b1a0d081210001800300050da1d60002001300068007000780080010088010098010042420a0763726561746564101c1a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a0010a2a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136101d1a0c080110201800300050176000200030015a4f6d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328656e645f74696d652c2073746172745f74696d652929292c2031363a3a3a494e543829680070007800800101880100980100481e529a040a077072696d61727910011801220c73746174656d656e745f6964220e7472616e73616374696f6e5f69642a0a73657373696f6e5f69642a1a7472616e73616374696f6e5f66696e6765727072696e745f69642a1873746174656d656e745f66696e6765727072696e745f69642a0770726f626c656d2a066361757365732a0571756572792a067374617475732a0a73746172745f74696d652a08656e645f74696d652a0966756c6c5f7363616e2a09757365725f6e616d652a086170705f6e616d652a0d757365725f7072696f726974792a0d64617461626173655f6e616d652a09706c616e5f676973742a07726574726965732a116c6173745f72657472795f726561736f6e2a12657865637574696f6e5f6e6f64655f6964732a15696e6465785f7265636f6d6d656e646174696f6e732a0c696d706c696369745f74786e2a0d6370755f73716c5f6e616e6f732a0a6572726f725f636f64652a0f636f6e74656e74696f6e5f74696d652a0f636f6e74656e74696f6e5f696e666f2a0764657461696c732a076372656174656430043002400040004a10080010001a00200028003000380040005a007001700370057006700770087009700a700b700c700d700e700f7010701170127013701470157016701770187019701a701b701c7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a7c0a127472616e73616374696f6e5f69645f69647810021800220e7472616e73616374696f6e5f69643002380440004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005ab4010a1e7472616e73616374696f6e5f66696e6765727072696e745f69645f69647810031800221a7472616e73616374696f6e5f66696e6765727072696e745f6964220a73746172745f74696d652208656e645f74696d653003300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005ab0010a1c73746174656d656e745f66696e6765727072696e745f69645f69647810041800221873746174656d656e745f66696e6765727072696e745f6964220a73746172745f74696d652208656e645f74696d653005300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005af4010a0e74696d655f72616e67655f69647810051800222a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136220a73746172745f74696d652208656e645f74696d65301d300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a201460801122a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618102208656e645f74696d65220a73746172745f74696d65a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060066a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20193020ad401637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291230636865636b5f637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f31361800281d300038014002b201c8030a077072696d61727910001a0a73657373696f6e5f69641a0e7472616e73616374696f6e5f69641a1a7472616e73616374696f6e5f66696e6765727072696e745f69641a0c73746174656d656e745f69641a1873746174656d656e745f66696e6765727072696e745f69641a0770726f626c656d1a066361757365731a0571756572791a067374617475731a0a73746172745f74696d651a08656e645f74696d651a0966756c6c5f7363616e1a09757365725f6e616d651a086170705f6e616d651a0d757365725f7072696f726974791a0d64617461626173655f6e616d651a09706c616e5f676973741a07726574726965731a116c6173745f72657472795f726561736f6e1a12657865637574696f6e5f6e6f64655f6964731a15696e6465785f7265636f6d6d656e646174696f6e731a0c696d706c696369745f74786e1a0d6370755f73716c5f6e616e6f731a0a6572726f725f636f64651a0f636f6e74656e74696f6e5f74696d651a0f636f6e74656e74696f6e5f696e666f1a0764657461696c731a0763726561746564200120022003200420052006200720082009200a200b200c200d200e200f2010201120122013201420152016201720182019201a201b201c2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} +,{"key":"8b89cb8a89","value":"030abb1b0a0e7461626c655f6d657461646174611843200128013a00422a0a0564625f696410011a0c08011040180030005014600020003000680070007800800100880100980100422d0a087461626c655f696410021a0c08011040180030005014600020003000680070007800800100880100980100422c0a0764625f6e616d6510031a0c0807100018003000501960002000300068007000780080010088010098010042300a0b736368656d615f6e616d6510041a0c08071000180030005019600020003000680070007800800100880100980100422f0a0a7461626c655f6e616d6510051a0c0807100018003000501960002000300068007000780080010088010098010042320a0d746f74616c5f636f6c756d6e7310061a0c0801104018003000501460002000300068007000780080010088010098010042320a0d746f74616c5f696e646578657310071a0c08011040180030005014600020003000680070007800800100880100980100423f0a0973746f72655f69647310081a1d080f104018003000380150f8075a0c080110401800300050146000600020003000680070007800800100880100980100423b0a167265706c69636174696f6e5f73697a655f627974657310091a0c0801104018003000501460002000300068007000780080010088010098010042310a0c746f74616c5f72616e676573100a1a0c08011040180030005014600020003000680070007800800100880100980100423a0a15746f74616c5f6c6976655f646174615f6279746573100b1a0c0801104018003000501460002000300068007000780080010088010098010042350a10746f74616c5f646174615f6279746573100c1a0c0801104018003000501460002000300068007000780080010088010098010042340a0e706572635f6c6976655f64617461100d1a0d080210401800300050bd0560002000300068007000780080010088010098010042360a116c6173745f7570646174655f6572726f72100e1a0c0807100018003000501960002001300068007000780080010088010098010042470a0c6c6173745f75706461746564100f1a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a4010a2c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313610101a0c080110201800300050176000200030015a516d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f6279746573287461626c655f69642c206c6173745f757064617465642929292c2031363a3a3a494e543829680070007800800101880100980100481152d9020a077072696d61727910011801220564625f696422087461626c655f69642a0764625f6e616d652a0b736368656d615f6e616d652a0a7461626c655f6e616d652a0d746f74616c5f636f6c756d6e732a0d746f74616c5f696e64657865732a0973746f72655f6964732a167265706c69636174696f6e5f73697a655f62797465732a0c746f74616c5f72616e6765732a15746f74616c5f6c6976655f646174615f62797465732a10746f74616c5f646174615f62797465732a0e706572635f6c6976655f646174612a116c6173745f7570646174655f6572726f722a0c6c6173745f7570646174656430013002400040004a10080010001a00200028003000380040005a007003700470057006700770087009700a700b700c700d700e700f7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005aa3010a237265706c69636174696f6e5f73697a655f62797465735f7461626c655f69645f6964781002180022167265706c69636174696f6e5f73697a655f627974657322087461626c655f6964300930023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a8f010a19746f74616c5f72616e6765735f7461626c655f69645f69647810031800220c746f74616c5f72616e67657322087461626c655f6964300a30023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a91010a1a746f74616c5f636f6c756d6e735f7461626c655f69645f69647810041800220d746f74616c5f636f6c756d6e7322087461626c655f6964300630023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a91010a1a746f74616c5f696e64657865735f7461626c655f69645f69647810051800220d746f74616c5f696e646578657322087461626c655f6964300730023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a8f010a15706572635f6c6976655f646174615f69645f69647810061800220e706572635f6c6976655f6461746122087461626c655f6964300d30023801400140004a10080010001a00200028003000380040005a00680d7a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005afc010a106c6173745f757064617465645f69647810071800222c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f3136220c6c6173745f7570646174656422087461626c655f69643010300f300238014000400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a2014a0801122c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f31361810220c6c6173745f7570646174656422087461626c655f6964a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a730a0b64625f6e616d655f67696e10081800220764625f6e616d6530033801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a790a0e7461626c655f6e616d655f67696e10091800220a7461626c655f6e616d6530053801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a7b0a0f736368656d615f6e616d655f67696e100a1800220b736368656d615f6e616d6530043801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a770a0d73746f72655f6964735f67696e100b1800220973746f72655f69647330083801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80100e00100e9010000000000000000600c6a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20197020ad601637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291232636865636b5f637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313618002810300038014002b20187020a077072696d61727910001a0564625f69641a087461626c655f69641a0764625f6e616d651a0b736368656d615f6e616d651a0a7461626c655f6e616d651a0d746f74616c5f636f6c756d6e731a0d746f74616c5f696e64657865731a0973746f72655f6964731a167265706c69636174696f6e5f73697a655f62797465731a0c746f74616c5f72616e6765731a15746f74616c5f6c6976655f646174615f62797465731a10746f74616c5f646174615f62797465731a0e706572635f6c6976655f646174611a116c6173745f7570646174655f6572726f721a0c6c6173745f75706461746564200120022003200420052006200720082009200a200b200c200d200e200f2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8c"} ,{"key":"8d"} ,{"key":"8d89888a89","value":"031080808040188080808002220308c0702803500058007801"} @@ -142,6 +143,7 @@ system hash=2ae2763375bdffaed3ccc39f6c4f9cabd4aa28567c542cc8e6e150c3497e3b4b ,{"key":"a68989a51273746174656d656e745f646961676e6f73746963735f726571756573747300018c89","value":"0146"} ,{"key":"a68989a51273746174656d656e745f657865637574696f6e5f696e73696768747300018c89","value":"018401"} ,{"key":"a68989a51273746174656d656e745f7374617469737469637300018c89","value":"0154"} +,{"key":"a68989a5127461626c655f6d6574616461746100018c89","value":"018601"} ,{"key":"a68989a5127461626c655f7374617469737469637300018c89","value":"0128"} ,{"key":"a68989a5127461736b5f7061796c6f61647300018c89","value":"0176"} ,{"key":"a68989a51274656e616e745f69645f73657100018c89","value":"017e"} @@ -193,12 +195,13 @@ system hash=2ae2763375bdffaed3ccc39f6c4f9cabd4aa28567c542cc8e6e150c3497e3b4b ,{"key":"c8"} ,{"key":"c9"} ,{"key":"ca"} +,{"key":"cb"} ] -tenant hash=17c52d00b75a99743d728d962c813d59843133b3181664f566183fc1aeece00f +tenant hash=d3028d00546e5e09272a67ccfe6fd31548fb436f2797a74e001dc7482a91bef4 ---- [{"key":""} -,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d100218002006"} +,{"key":"8b89898a89","value":"0312450a0673797374656d10011a250a0d0a0561646d696e1080101880100a0c0a04726f6f7410801018801012046e6f646518032200280140004a006a0a08d8843d10021800200e"} ,{"key":"8b898b8a89","value":"030a8e030a0a64657363726970746f721803200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422f0a0a64657363726970746f7210021a0c08081000180030005011600020013000680070007800800100880100980100480352710a077072696d61727910011801220269642a0a64657363726970746f72300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b201240a1066616d5f325f64657363726970746f7210021a0a64657363726970746f7220022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} ,{"key":"8b898c8a89","value":"030ac7050a0575736572731804200128013a00422d0a08757365726e616d6510011a0c0807100018003000501960002000300068007000780080010088010098010042330a0e68617368656450617373776f726410021a0c0808100018003000501160002001300068007000780080010088010098010042320a066973526f6c6510031a0c08001000180030005010600020002a0566616c73653000680070007800800100880100980100422c0a07757365725f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055290010a077072696d617279100118012208757365726e616d652a0e68617368656450617373776f72642a066973526f6c652a07757365725f6964300140004a10080010001a00200028003000380040005a007002700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005a740a1175736572735f757365725f69645f696478100218012207757365725f69643004380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201240a077072696d61727910001a08757365726e616d651a07757365725f6964200120042804b2012c0a1466616d5f325f68617368656450617373776f726410021a0e68617368656450617373776f726420022802b2011c0a0c66616d5f335f6973526f6c6510031a066973526f6c6520032803b80104c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b898d8a89","value":"030afd020a057a6f6e65731805200128013a0042270a02696410011a0c08011040180030005014600020003000680070007800800100880100980100422b0a06636f6e66696710021a0c080810001800300050116000200130006800700078008001008801009801004803526d0a077072696d61727910011801220269642a06636f6e666967300140004a10080010001a00200028003000380040005a0070027a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201130a077072696d61727910001a02696420012800b2011c0a0c66616d5f325f636f6e66696710021a06636f6e66696720022802b80103c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} @@ -235,7 +238,7 @@ tenant hash=17c52d00b75a99743d728d962c813d59843133b3181664f566183fc1aeece00f ,{"key":"8b89b38a89","value":"030a8f200a167472616e73616374696f6e5f73746174697374696373182b200128013a0042330a0d616767726567617465645f747310011a0d080910001800300050a00960002000300068007000780080010088010098010042330a0e66696e6765727072696e745f696410021a0c08081000180030005011600020003000680070007800800100880100980100422d0a086170705f6e616d6510031a0c08071000180030005019600020003000680070007800800100880100980100422c0a076e6f64655f696410041a0c0801104018003000501460002000300068007000780080010088010098010042380a0c6167675f696e74657276616c10051a13080610001800300050a20960006a040800100020003000680070007800800100880100980100422e0a086d6574616461746110061a0d081210001800300050da1d60002000300068007000780080010088010098010042300a0a7374617469737469637310071a0d081210001800300050da1d60002000300068007000780080010088010098010042cf010a43637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3810081a0c080110201800300050176000200030015a656d6f6428666e76333228637264625f696e7465726e616c2e646174756d735f746f5f627974657328616767726567617465645f74732c206170705f6e616d652c2066696e6765727072696e745f69642c206e6f64655f696429292c20383a3a3a494e54382968007000780080010088010098010042710a0f657865637574696f6e5f636f756e7410091a0c080110401800300050146000200130005a3b2828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e27636e74273a3a3a535452494e47293a3a494e5438680070007800800100880100980100428a010a0f736572766963655f6c6174656e6379100a1a0d080210401800300050bd056000200130005a53282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e277376634c6174273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f4154386800700078008001008801009801004297010a0d6370755f73716c5f6e616e6f73100b1a0d080210401800300050bd056000200130005a62282828737461746973746963732d3e27657865637574696f6e5f73746174697374696373273a3a3a535452494e47292d3e2763707553514c4e616e6f73273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100429c010a0f636f6e74656e74696f6e5f74696d65100c1a0d080210401800300050bd056000200130005a65282828737461746973746963732d3e27657865637574696f6e5f73746174697374696373273a3a3a535452494e47292d3e27636f6e74656e74696f6e54696d65273a3a3a535452494e47292d3e276d65616e273a3a3a535452494e47293a3a464c4f41543868007000780080010088010098010042dc010a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d65100d1a0d080210401800300050bd056000200130005a95012828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e3e27636e74273a3a3a535452494e47293a3a464c4f415438202a20282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e277376634c6174273a3a3a535452494e47292d3e3e276d65616e273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100428a010a0b7039395f6c6174656e6379100e1a0d080210401800300050bd056000200130005a57282828737461746973746963732d3e2773746174697374696373273a3a3a535452494e47292d3e276c6174656e6379496e666f273a3a3a535452494e47292d3e27703939273a3a3a535452494e47293a3a464c4f415438680070007800800100880100980100480f5280040a077072696d617279100118012243637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f38220d616767726567617465645f7473220e66696e6765727072696e745f696422086170705f6e616d6522076e6f64655f69642a0c6167675f696e74657276616c2a086d657461646174612a0a737461746973746963732a0f657865637574696f6e5f636f756e742a0f736572766963655f6c6174656e63792a0d6370755f73716c5f6e616e6f732a0f636f6e74656e74696f6e5f74696d652a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d652a0b7039395f6c6174656e637930083001300230033004400040004000400040004a10080010001a00200028003000380040005a007005700670077009700a700b700c700d700e7a0408002000800100880100900104980101a2017b08011243637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f381808220d616767726567617465645f747322086170705f6e616d65220e66696e6765727072696e745f696422076e6f64655f6964a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a85010a1566696e6765727072696e745f73746174735f69647810021800220e66696e6765727072696e745f69643002380838013803380440004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005acb010a13657865637574696f6e5f636f756e745f69647810031800220d616767726567617465645f747322086170705f6e616d65220f657865637574696f6e5f636f756e743001300330093808380238044000400040014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005acd010a13736572766963655f6c6174656e63795f69647810041800220d616767726567617465645f747322086170705f6e616d65220f736572766963655f6c6174656e637930013003300a3808380238044000400040014a10080010001a00200028003000380040005a00680a7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005ac9010a116370755f73716c5f6e616e6f735f69647810051800220d616767726567617465645f747322086170705f6e616d65220d6370755f73716c5f6e616e6f7330013003300b3808380238044000400040014a10080010001a00200028003000380040005a00680b7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005acd010a13636f6e74656e74696f6e5f74696d655f69647810061800220d616767726567617465645f747322086170705f6e616d65220f636f6e74656e74696f6e5f74696d6530013003300c3808380238044000400040014a10080010001a00200028003000380040005a00680c7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005aeb010a22746f74616c5f657374696d617465645f657865637574696f6e5f74696d655f69647810071800220d616767726567617465645f747322086170705f6e616d65221e746f74616c5f657374696d617465645f657865637574696f6e5f74696d6530013003300d3808380238044000400040014a10080010001a00200028003000380040005a00680d7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e90100000000000000005ac5010a0f7039395f6c6174656e63795f69647810081800220d616767726567617465645f747322086170705f6e616d65220b7039395f6c6174656e637930013003300e3808380238044000400040014a10080010001a00200028003000380040005a00680e7a0408002000800100880100900103980100a20106080012001800a80100b20100ba01286170705f6e616d65204e4f54204c494b4520272420696e7465726e616c25273a3a3a535452494e47c00100c80100d00100e00100e901000000000000000060096a210a0b0a0561646d696e102018200a0a0a04726f6f741020182012046e6f64651803800101880103980100a201ef010a9701637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3820494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e5438291249636865636b5f637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f3818002808300038014002b201b3020a077072696d61727910001a43637264625f696e7465726e616c5f616767726567617465645f74735f6170705f6e616d655f66696e6765727072696e745f69645f6e6f64655f69645f73686172645f381a0d616767726567617465645f74731a0e66696e6765727072696e745f69641a086170705f6e616d651a076e6f64655f69641a0c6167675f696e74657276616c1a086d657461646174611a0a737461746973746963731a0f657865637574696f6e5f636f756e741a0f736572766963655f6c6174656e63791a0d6370755f73716c5f6e616e6f731a0f636f6e74656e74696f6e5f74696d651a1e746f74616c5f657374696d617465645f657865637574696f6e5f74696d651a0b7039395f6c6174656e6379200820012002200320042005200620072009200a200b200c200d200e2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b48a89","value":"030aed050a1664617461626173655f726f6c655f73657474696e6773182c200128013a0042300a0b64617461626173655f696410011a0c080c100018003000501a600020003000680070007800800100880100980100422e0a09726f6c655f6e616d6510021a0c08071000180030005019600020003000680070007800800100880100980100423e0a0873657474696e677310031a1d080f100018003000380750f1075a0c080710001800300050196000600020003000680070007800800100880100980100422c0a07726f6c655f696410041a0c080c100018003000501a60002000300068007000780080010088010098010048055292010a077072696d61727910011801220b64617461626173655f69642209726f6c655f6e616d652a0873657474696e67732a07726f6c655f696430013002400040004a10080010001a00200028003000380040005a00700370047a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00102e00100e90100000000000000005aae010a2e64617461626173655f726f6c655f73657474696e67735f64617461626173655f69645f726f6c655f69645f6b657910021801220b64617461626173655f69642207726f6c655f69642a0873657474696e6773300130043802400040004a10080010001a00200028003000380040005a0070037a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201400a077072696d61727910001a0b64617461626173655f69641a09726f6c655f6e616d651a0873657474696e67731a07726f6c655f696420012002200320042800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b58a89","value":"030aba0b0a0c74656e616e745f7573616765182d200128013a00422e0a0974656e616e745f696410011a0c0801104018003000501460002000300068007000780080010088010098010042300a0b696e7374616e63655f696410021a0c0801104018003000501460002000300068007000780080010088010098010042350a106e6578745f696e7374616e63655f696410031a0c0801104018003000501460002000300068007000780080010088010098010042310a0b6c6173745f75706461746510041a0d080510001800300050da0860002000300068007000780080010088010098010042340a0e72755f62757273745f6c696d697410051a0d080210401800300050bd0560002001300068007000780080010088010098010042340a0e72755f726566696c6c5f7261746510061a0d080210401800300050bd0560002001300068007000780080010088010098010042300a0a72755f63757272656e7410071a0d080210401800300050bd0560002001300068007000780080010088010098010042370a1163757272656e745f73686172655f73756d10081a0d080210401800300050bd0560002001300068007000780080010088010098010042360a11746f74616c5f636f6e73756d7074696f6e10091a0c0808100018003000501160002001300068007000780080010088010098010042330a0e696e7374616e63655f6c65617365100a1a0c0808100018003000501160002001300068007000780080010088010098010042310a0c696e7374616e63655f736571100b1a0c0801104018003000501460002001300068007000780080010088010098010042350a0f696e7374616e63655f736861726573100c1a0d080210401800300050bd0560002001300068007000780080010088010098010042320a0d63757272656e745f7261746573100d1a0c08081000180030005011600020013000680070007800800100880100980100422f0a0a6e6578745f7261746573100e1a0c08081000180030005011600020013000680070007800800100880100980100480f52ce020a077072696d61727910011801220974656e616e745f6964220b696e7374616e63655f69642a106e6578745f696e7374616e63655f69642a0b6c6173745f7570646174652a0e72755f62757273745f6c696d69742a0e72755f726566696c6c5f726174652a0a72755f63757272656e742a1163757272656e745f73686172655f73756d2a11746f74616c5f636f6e73756d7074696f6e2a0e696e7374616e63655f6c656173652a0c696e7374616e63655f7365712a0f696e7374616e63655f7368617265732a0d63757272656e745f72617465732a0a6e6578745f726174657330013002400040004a10080010001a00200028003000380040005a007003700470057006700770087009700a700b700c700d700e7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201fc010a077072696d61727910001a0974656e616e745f69641a0b696e7374616e63655f69641a106e6578745f696e7374616e63655f69641a0b6c6173745f7570646174651a0e72755f62757273745f6c696d69741a0e72755f726566696c6c5f726174651a0a72755f63757272656e741a1163757272656e745f73686172655f73756d1a11746f74616c5f636f6e73756d7074696f6e1a0e696e7374616e63655f6c656173651a0c696e7374616e63655f7365711a0f696e7374616e63655f7368617265731a0d63757272656e745f72617465731a0a6e6578745f7261746573200120022003200420052006200720082009200a200b200c200d200e2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800301880302a80300b00300d00300d80300e00300"} -,{"key":"8b89b68a89","value":"030af2050a0d73716c5f696e7374616e636573182e200128013a0042270a02696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046164647210021a0c08071000180030005019600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410031a0c08081000180030005011600020013000680070007800800100880100980100422e0a086c6f63616c69747910041a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0873716c5f6164647210051a0c0807100018003000501960002001300068007000780080010088010098010042300a0b637264625f726567696f6e10061a0c0808100018003000501160002000300068007000780080010088010098010042330a0e62696e6172795f76657273696f6e10071a0c08071000180030005019600020013000680070007800800100880100980100480852b4010a077072696d61727910021801220b637264625f726567696f6e220269642a04616464722a0a73657373696f6e5f69642a086c6f63616c6974792a0873716c5f616464722a0e62696e6172795f76657273696f6e30063001400040004a10080010001a00200028003000380040005a00700270037004700570077a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201620a077072696d61727910001a0269641a04616464721a0a73657373696f6e5f69641a086c6f63616c6974791a0873716c5f616464721a0b637264625f726567696f6e1a0e62696e6172795f76657273696f6e20012002200320042005200620072800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} +,{"key":"8b89b68a89","value":"030ac2060a0d73716c5f696e7374616e636573182e200128013a0042270a02696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046164647210021a0c08071000180030005019600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410031a0c08081000180030005011600020013000680070007800800100880100980100422e0a086c6f63616c69747910041a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0873716c5f6164647210051a0c0807100018003000501960002001300068007000780080010088010098010042300a0b637264625f726567696f6e10061a0c0808100018003000501160002000300068007000780080010088010098010042330a0e62696e6172795f76657273696f6e10071a0c0807100018003000501960002001300068007000780080010088010098010042300a0b69735f647261696e696e6710081a0c08001000180030005010600020013000680070007800800100880100980100480952c3010a077072696d61727910021801220b637264625f726567696f6e220269642a04616464722a0a73657373696f6e5f69642a086c6f63616c6974792a0873716c5f616464722a0e62696e6172795f76657273696f6e2a0b69735f647261696e696e6730063001400040004a10080010001a00200028003000380040005a007002700370047005700770087a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060036a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b201710a077072696d61727910001a0269641a04616464721a0a73657373696f6e5f69641a086c6f63616c6974791a0873716c5f616464721a0b637264625f726567696f6e1a0e62696e6172795f76657273696f6e1a0b69735f647261696e696e67200120022003200420052006200720082800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} ,{"key":"8b89b78a89","value":"030a81040a137370616e5f636f6e66696775726174696f6e73182f200128013a00422e0a0973746172745f6b657910011a0c08081000180030005011600020003000680070007800800100880100980100422c0a07656e645f6b657910021a0c08081000180030005011600020003000680070007800800100880100980100422b0a06636f6e66696710031a0c080810001800300050116000200030006800700078008001008801009801004804527f0a077072696d61727910011801220973746172745f6b65792a07656e645f6b65792a06636f6e666967300140004a10080010001a00200028003000380040005a00700270037a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a2012f0a1373746172745f6b6579203c20656e645f6b6579120c636865636b5f626f756e6473180028012802300038004002b2012f0a077072696d61727910001a0973746172745f6b65791a07656e645f6b65791a06636f6e6669672001200220032800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800301880303a80300b00300d00300d80300e00300"} ,{"key":"8b89b88a89","value":"030ad3020a0b726f6c655f69645f7365711830200128013a00422a0a0576616c756510011a0c08011040180030005014600020003000680070007800800100880100980100480052660a077072696d61727910011800220576616c7565300140004a10080010001a00200028003000380040005a007a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060006a250a0d0a0561646d696e10a00618a0060a0c0a04726f6f7410a00618a00612046e6f64651803800100880103980100b201160a077072696d61727910001a0576616c756520012801b80100c20100e2011a0801106418ffffffff0720642800320408001000380142004800e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880300a80300b00300d00300d80300e00300"} ,{"key":"8b89ba8a89","value":"030aea050a0f74656e616e745f73657474696e67731832200128013a00422e0a0974656e616e745f696410011a0c0801104018003000501460002000300068007000780080010088010098010042290a046e616d6510021a0c08071000180030005019600020003000680070007800800100880100980100422a0a0576616c756510031a0c0807100018003000501960002000300068007000780080010088010098010042450a0c6c6173745f7570646174656410041a0d080510001800300050da08600020002a116e6f7728293a3a3a54494d455354414d503000680070007800800100880100980100422f0a0a76616c75655f7479706510051a0c08071000180030005019600020003000680070007800800100880100980100422b0a06726561736f6e10061a0c08071000180030005019600020013000680070007800800100880100980100480752a5010a077072696d61727910011801220974656e616e745f696422046e616d652a0576616c75652a0c6c6173745f757064617465642a0a76616c75655f747970652a06726561736f6e30013002400040004a10080010001a00200028003000380040005a0070037004700570067a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100b20185010a3966616d5f305f74656e616e745f69645f6e616d655f76616c75655f6c6173745f757064617465645f76616c75655f747970655f726561736f6e10001a0974656e616e745f69641a046e616d651a0576616c75651a0c6c6173745f757064617465641a0a76616c75655f747970651a06726561736f6e2001200220032004200520062800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880302a80300b00300d00300d80300e00300"} @@ -255,6 +258,7 @@ tenant hash=17c52d00b75a99743d728d962c813d59843133b3181664f566183fc1aeece00f ,{"key":"8b89c88a89","value":"030abf0a0a0f6d7663635f737461746973746963731840200128013a0042450a0a637265617465645f617410011a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042300a0b64617461626173655f696410021a0c08011040180030005014600020003000680070007800800100880100980100422d0a087461626c655f696410031a0c08011040180030005014600020003000680070007800800100880100980100422d0a08696e6465785f696410041a0c0801104018003000501460002000300068007000780080010088010098010042300a0a7374617469737469637310051a0d081210001800300050da1d60002000300068007000780080010088010098010042ab010a3f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313610061a0c080110201800300050176000200030015a456d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328637265617465645f61742929292c2031363a3a3a494e543829680070007800800101880100980100480752e4020a146d7663635f737461746973746963735f706b657910011801223f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f3136220a637265617465645f6174220b64617461626173655f696422087461626c655f69642208696e6465785f69642a0a7374617469737469637330063001300230033004400040004000400040004a10080010001a00200028003000380040005a0070057a0408002000800100880100900104980101a201720801123f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f31361810220a637265617465645f6174220b64617461626173655f69642208696e6465785f696422087461626c655f6964a80100b20100ba0100c00100c80100d00101e00100e901000000000000000060026a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a201bd020ae901637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291245636865636b5f637264625f696e7465726e616c5f637265617465645f61745f64617461626173655f69645f696e6465785f69645f7461626c655f69645f73686172645f313618002806300038014002b201500a077072696d61727910001a0a637265617465645f61741a0b64617461626173655f69641a087461626c655f69641a08696e6465785f69641a0a73746174697374696373200120022003200420052805b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89c98a89","value":"030ab5170a1e7472616e73616374696f6e5f657865637574696f6e5f696e7369676874731841200128013a0042340a0e7472616e73616374696f6e5f696410011a0d080e100018003000508617600020003000680070007800800100880100980100423f0a1a7472616e73616374696f6e5f66696e6765727072696e745f696410021a0c0808100018003000501160002000300068007000780080010088010098010042320a0d71756572795f73756d6d61727910031a0c0807100018003000501960002001300068007000780080010088010098010042310a0c696d706c696369745f74786e10041a0c08001000180030005010600020013000680070007800800100880100980100422f0a0a73657373696f6e5f696410051a0c0807100018003000501960002000300068007000780080010088010098010042300a0a73746172745f74696d6510061a0d080910001800300050a009600020013000680070007800800100880100980100422e0a08656e645f74696d6510071a0d080910001800300050a009600020013000680070007800800100880100980100422e0a09757365725f6e616d6510081a0c08071000180030005019600020013000680070007800800100880100980100422d0a086170705f6e616d6510091a0c0807100018003000501960002001300068007000780080010088010098010042320a0d757365725f7072696f72697479100a1a0c08071000180030005019600020013000680070007800800100880100980100422c0a0772657472696573100b1a0c0801104018003000501460002001300068007000780080010088010098010042360a116c6173745f72657472795f726561736f6e100c1a0c08071000180030005019600020013000680070007800800100880100980100423e0a0870726f626c656d73100d1a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100423c0a06636175736573100e1a1d080f104018003000380150f8075a0c08011040180030005014600060002001300068007000780080010088010098010042480a1273746d745f657865637574696f6e5f696473100f1a1d080f100018003000380750f1075a0c08071000180030005019600060002001300068007000780080010088010098010042320a0d6370755f73716c5f6e616e6f7310101a0c0801104018003000501460002001300068007000780080010088010098010042340a0f6c6173745f6572726f725f636f646510111a0c08071000180030005019600020013000680070007800800100880100980100422b0a0673746174757310121a0c08011040180030005014600020013000680070007800800100880100980100423b0a0f636f6e74656e74696f6e5f74696d6510131a13080610001800300050a20960006a04080010002001300068007000780080010088010098010042350a0f636f6e74656e74696f6e5f696e666f10141a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0764657461696c7310151a0d081210001800300050da1d60002001300068007000780080010088010098010042420a076372656174656410161a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a0010a2a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313610171a0c080110201800300050176000200030015a4f6d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328656e645f74696d652c2073746172745f74696d652929292c2031363a3a3a494e543829680070007800800101880100980100481852b6030a077072696d61727910011801220e7472616e73616374696f6e5f69642a1a7472616e73616374696f6e5f66696e6765727072696e745f69642a0d71756572795f73756d6d6172792a0c696d706c696369745f74786e2a0a73657373696f6e5f69642a0a73746172745f74696d652a08656e645f74696d652a09757365725f6e616d652a086170705f6e616d652a0d757365725f7072696f726974792a07726574726965732a116c6173745f72657472795f726561736f6e2a0870726f626c656d732a066361757365732a1273746d745f657865637574696f6e5f6964732a0d6370755f73716c5f6e616e6f732a0f6c6173745f6572726f725f636f64652a067374617475732a0f636f6e74656e74696f6e5f74696d652a0f636f6e74656e74696f6e5f696e666f2a0764657461696c732a0763726561746564300140004a10080010001a00200028003000380040005a0070027003700470057006700770087009700a700b700c700d700e700f70107011701270137014701570167a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a94010a1e7472616e73616374696f6e5f66696e6765727072696e745f69645f69647810021800221a7472616e73616374696f6e5f66696e6765727072696e745f69643002380140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005af2010a0e74696d655f72616e67655f69647810031800222a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136220a73746172745f74696d652208656e645f74696d6530173006300738014000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a201460801122a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618102208656e645f74696d65220a73746172745f74696d65a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060046a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20193020ad401637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291230636865636b5f637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618002817300038014002b201e6020a077072696d61727910001a0e7472616e73616374696f6e5f69641a1a7472616e73616374696f6e5f66696e6765727072696e745f69641a0d71756572795f73756d6d6172791a0c696d706c696369745f74786e1a0a73657373696f6e5f69641a0a73746172745f74696d651a08656e645f74696d651a09757365725f6e616d651a086170705f6e616d651a0d757365725f7072696f726974791a07726574726965731a116c6173745f72657472795f726561736f6e1a0870726f626c656d731a066361757365731a1273746d745f657865637574696f6e5f6964731a0d6370755f73716c5f6e616e6f731a0f6c6173745f6572726f725f636f64651a067374617475731a0f636f6e74656e74696f6e5f74696d651a0f636f6e74656e74696f6e5f696e666f1a0764657461696c731a0763726561746564200120022003200420052006200720082009200a200b200c200d200e200f20102011201220132014201520162800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8b89ca8a89","value":"030a801e0a1c73746174656d656e745f657865637574696f6e5f696e7369676874731842200128013a00422f0a0a73657373696f6e5f696410011a0c0807100018003000501960002000300068007000780080010088010098010042340a0e7472616e73616374696f6e5f696410021a0d080e100018003000508617600020003000680070007800800100880100980100423f0a1a7472616e73616374696f6e5f66696e6765727072696e745f696410031a0c0808100018003000501160002000300068007000780080010088010098010042310a0c73746174656d656e745f696410041a0c08071000180030005019600020003000680070007800800100880100980100423d0a1873746174656d656e745f66696e6765727072696e745f696410051a0c08081000180030005011600020003000680070007800800100880100980100422c0a0770726f626c656d10061a0c08011040180030005014600020013000680070007800800100880100980100423c0a0663617573657310071a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100422a0a05717565727910081a0c08071000180030005019600020013000680070007800800100880100980100422b0a0673746174757310091a0c0801104018003000501460002001300068007000780080010088010098010042300a0a73746172745f74696d65100a1a0d080910001800300050a009600020013000680070007800800100880100980100422e0a08656e645f74696d65100b1a0d080910001800300050a009600020013000680070007800800100880100980100422e0a0966756c6c5f7363616e100c1a0c08001000180030005010600020013000680070007800800100880100980100422e0a09757365725f6e616d65100d1a0c08071000180030005019600020013000680070007800800100880100980100422d0a086170705f6e616d65100e1a0c0807100018003000501960002001300068007000780080010088010098010042320a0d757365725f7072696f72697479100f1a0c0807100018003000501960002001300068007000780080010088010098010042320a0d64617461626173655f6e616d6510101a0c08071000180030005019600020013000680070007800800100880100980100422e0a09706c616e5f6769737410111a0c08071000180030005019600020013000680070007800800100880100980100422c0a077265747269657310121a0c0801104018003000501460002001300068007000780080010088010098010042360a116c6173745f72657472795f726561736f6e10131a0c0807100018003000501960002001300068007000780080010088010098010042480a12657865637574696f6e5f6e6f64655f69647310141a1d080f104018003000380150f8075a0c080110401800300050146000600020013000680070007800800100880100980100424b0a15696e6465785f7265636f6d6d656e646174696f6e7310151a1d080f100018003000380750f1075a0c08071000180030005019600060002001300068007000780080010088010098010042310a0c696d706c696369745f74786e10161a0c0800100018003000501060002001300068007000780080010088010098010042320a0d6370755f73716c5f6e616e6f7310171a0c08011040180030005014600020013000680070007800800100880100980100422f0a0a6572726f725f636f646510181a0c08071000180030005019600020013000680070007800800100880100980100423b0a0f636f6e74656e74696f6e5f74696d6510191a13080610001800300050a20960006a04080010002001300068007000780080010088010098010042350a0f636f6e74656e74696f6e5f696e666f101a1a0d081210001800300050da1d600020013000680070007800800100880100980100422d0a0764657461696c73101b1a0d081210001800300050da1d60002001300068007000780080010088010098010042420a0763726561746564101c1a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a0010a2a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136101d1a0c080110201800300050176000200030015a4f6d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f627974657328656e645f74696d652c2073746172745f74696d652929292c2031363a3a3a494e543829680070007800800101880100980100481e529a040a077072696d61727910011801220c73746174656d656e745f6964220e7472616e73616374696f6e5f69642a0a73657373696f6e5f69642a1a7472616e73616374696f6e5f66696e6765727072696e745f69642a1873746174656d656e745f66696e6765727072696e745f69642a0770726f626c656d2a066361757365732a0571756572792a067374617475732a0a73746172745f74696d652a08656e645f74696d652a0966756c6c5f7363616e2a09757365725f6e616d652a086170705f6e616d652a0d757365725f7072696f726974792a0d64617461626173655f6e616d652a09706c616e5f676973742a07726574726965732a116c6173745f72657472795f726561736f6e2a12657865637574696f6e5f6e6f64655f6964732a15696e6465785f7265636f6d6d656e646174696f6e732a0c696d706c696369745f74786e2a0d6370755f73716c5f6e616e6f732a0a6572726f725f636f64652a0f636f6e74656e74696f6e5f74696d652a0f636f6e74656e74696f6e5f696e666f2a0764657461696c732a076372656174656430043002400040004a10080010001a00200028003000380040005a007001700370057006700770087009700a700b700c700d700e700f7010701170127013701470157016701770187019701a701b701c7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005a7c0a127472616e73616374696f6e5f69645f69647810021800220e7472616e73616374696f6e5f69643002380440004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005ab4010a1e7472616e73616374696f6e5f66696e6765727072696e745f69645f69647810031800221a7472616e73616374696f6e5f66696e6765727072696e745f6964220a73746172745f74696d652208656e645f74696d653003300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005ab0010a1c73746174656d656e745f66696e6765727072696e745f69645f69647810041800221873746174656d656e745f66696e6765727072696e745f6964220a73746172745f74696d652208656e645f74696d653005300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005af4010a0e74696d655f72616e67655f69647810051800222a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f3136220a73746172745f74696d652208656e645f74696d65301d300a300b380438024000400140014a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a201460801122a637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313618102208656e645f74696d65220a73746172745f74696d65a80100b20100ba0100c00100c80100d00100e00100e901000000000000000060066a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20193020ad401637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291230636865636b5f637264625f696e7465726e616c5f656e645f74696d655f73746172745f74696d655f73686172645f31361800281d300038014002b201c8030a077072696d61727910001a0a73657373696f6e5f69641a0e7472616e73616374696f6e5f69641a1a7472616e73616374696f6e5f66696e6765727072696e745f69641a0c73746174656d656e745f69641a1873746174656d656e745f66696e6765727072696e745f69641a0770726f626c656d1a066361757365731a0571756572791a067374617475731a0a73746172745f74696d651a08656e645f74696d651a0966756c6c5f7363616e1a09757365725f6e616d651a086170705f6e616d651a0d757365725f7072696f726974791a0d64617461626173655f6e616d651a09706c616e5f676973741a07726574726965731a116c6173745f72657472795f726561736f6e1a12657865637574696f6e5f6e6f64655f6964731a15696e6465785f7265636f6d6d656e646174696f6e731a0c696d706c696369745f74786e1a0d6370755f73716c5f6e616e6f731a0a6572726f725f636f64651a0f636f6e74656e74696f6e5f74696d651a0f636f6e74656e74696f6e5f696e666f1a0764657461696c731a0763726561746564200120022003200420052006200720082009200a200b200c200d200e200f2010201120122013201420152016201720182019201a201b201c2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} +,{"key":"8b89cb8a89","value":"030abb1b0a0e7461626c655f6d657461646174611843200128013a00422a0a0564625f696410011a0c08011040180030005014600020003000680070007800800100880100980100422d0a087461626c655f696410021a0c08011040180030005014600020003000680070007800800100880100980100422c0a0764625f6e616d6510031a0c0807100018003000501960002000300068007000780080010088010098010042300a0b736368656d615f6e616d6510041a0c08071000180030005019600020003000680070007800800100880100980100422f0a0a7461626c655f6e616d6510051a0c0807100018003000501960002000300068007000780080010088010098010042320a0d746f74616c5f636f6c756d6e7310061a0c0801104018003000501460002000300068007000780080010088010098010042320a0d746f74616c5f696e646578657310071a0c08011040180030005014600020003000680070007800800100880100980100423f0a0973746f72655f69647310081a1d080f104018003000380150f8075a0c080110401800300050146000600020003000680070007800800100880100980100423b0a167265706c69636174696f6e5f73697a655f627974657310091a0c0801104018003000501460002000300068007000780080010088010098010042310a0c746f74616c5f72616e676573100a1a0c08011040180030005014600020003000680070007800800100880100980100423a0a15746f74616c5f6c6976655f646174615f6279746573100b1a0c0801104018003000501460002000300068007000780080010088010098010042350a10746f74616c5f646174615f6279746573100c1a0c0801104018003000501460002000300068007000780080010088010098010042340a0e706572635f6c6976655f64617461100d1a0d080210401800300050bd0560002000300068007000780080010088010098010042360a116c6173745f7570646174655f6572726f72100e1a0c0807100018003000501960002001300068007000780080010088010098010042470a0c6c6173745f75706461746564100f1a0d080910001800300050a009600020002a136e6f7728293a3a3a54494d455354414d50545a300068007000780080010088010098010042a4010a2c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313610101a0c080110201800300050176000200030015a516d6f6428666e763332286d643528637264625f696e7465726e616c2e646174756d735f746f5f6279746573287461626c655f69642c206c6173745f757064617465642929292c2031363a3a3a494e543829680070007800800101880100980100481152d9020a077072696d61727910011801220564625f696422087461626c655f69642a0764625f6e616d652a0b736368656d615f6e616d652a0a7461626c655f6e616d652a0d746f74616c5f636f6c756d6e732a0d746f74616c5f696e64657865732a0973746f72655f6964732a167265706c69636174696f6e5f73697a655f62797465732a0c746f74616c5f72616e6765732a15746f74616c5f6c6976655f646174615f62797465732a10746f74616c5f646174615f62797465732a0e706572635f6c6976655f646174612a116c6173745f7570646174655f6572726f722a0c6c6173745f7570646174656430013002400040004a10080010001a00200028003000380040005a007003700470057006700770087009700a700b700c700d700e700f7a0408002000800100880100900104980101a20106080012001800a80100b20100ba0100c00100c80100d00101e00100e90100000000000000005aa3010a237265706c69636174696f6e5f73697a655f62797465735f7461626c655f69645f6964781002180022167265706c69636174696f6e5f73697a655f627974657322087461626c655f6964300930023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a8f010a19746f74616c5f72616e6765735f7461626c655f69645f69647810031800220c746f74616c5f72616e67657322087461626c655f6964300a30023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a91010a1a746f74616c5f636f6c756d6e735f7461626c655f69645f69647810041800220d746f74616c5f636f6c756d6e7322087461626c655f6964300630023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a91010a1a746f74616c5f696e64657865735f7461626c655f69645f69647810051800220d746f74616c5f696e646578657322087461626c655f6964300730023801400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a8f010a15706572635f6c6976655f646174615f69645f69647810061800220e706572635f6c6976655f6461746122087461626c655f6964300d30023801400140004a10080010001a00200028003000380040005a00680d7a0408002000800100880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005afc010a106c6173745f757064617465645f69647810071800222c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f3136220c6c6173745f7570646174656422087461626c655f69643010300f300238014000400140004a10080010001a00200028003000380040005a007a0408002000800100880100900103980100a2014a0801122c637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f31361810220c6c6173745f7570646174656422087461626c655f6964a80100b20100ba0100c00100c80100d00100e00100e90100000000000000005a730a0b64625f6e616d655f67696e10081800220764625f6e616d6530033801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a790a0e7461626c655f6e616d655f67696e10091800220a7461626c655f6e616d6530053801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a7b0a0f736368656d615f6e616d655f67696e100a1800220b736368656d615f6e616d6530043801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80101e00100e90100000000000000005a770a0d73746f72655f6964735f67696e100b1800220973746f72655f69647330083801380240004a10080010001a00200028003000380040005a007a0408002000800101880100900103980100a20106080012001800a80100b20100ba0100c00100c80100d00100d80100e00100e9010000000000000000600c6a250a0d0a0561646d696e10e00318e0030a0c0a04726f6f7410e00318e00312046e6f64651803800101880103980100a20197020ad601637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313620494e2028303a3a3a494e54382c20313a3a3a494e54382c20323a3a3a494e54382c20333a3a3a494e54382c20343a3a3a494e54382c20353a3a3a494e54382c20363a3a3a494e54382c20373a3a3a494e54382c20383a3a3a494e54382c20393a3a3a494e54382c2031303a3a3a494e54382c2031313a3a3a494e54382c2031323a3a3a494e54382c2031333a3a3a494e54382c2031343a3a3a494e54382c2031353a3a3a494e5438291232636865636b5f637264625f696e7465726e616c5f6c6173745f757064617465645f7461626c655f69645f73686172645f313618002810300038014002b20187020a077072696d61727910001a0564625f69641a087461626c655f69641a0764625f6e616d651a0b736368656d615f6e616d651a0a7461626c655f6e616d651a0d746f74616c5f636f6c756d6e731a0d746f74616c5f696e64657865731a0973746f72655f6964731a167265706c69636174696f6e5f73697a655f62797465731a0c746f74616c5f72616e6765731a15746f74616c5f6c6976655f646174615f62797465731a10746f74616c5f646174615f62797465731a0e706572635f6c6976655f646174611a116c6173745f7570646174655f6572726f721a0c6c6173745f75706461746564200120022003200420052006200720082009200a200b200c200d200e200f2800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200800300880303a80300b00300d00300d80300e00300"} ,{"key":"8d89888a89","value":"031080808040188080808002220308c0702803500058007801"} ,{"key":"8f898888","value":"01c801"} ,{"key":"90898988","value":"0a2a160c080110001a0020002a004200160673797374656d13021304"} @@ -304,6 +308,7 @@ tenant hash=17c52d00b75a99743d728d962c813d59843133b3181664f566183fc1aeece00f ,{"key":"a68989a51273746174656d656e745f646961676e6f73746963735f726571756573747300018c89","value":"0146"} ,{"key":"a68989a51273746174656d656e745f657865637574696f6e5f696e73696768747300018c89","value":"018401"} ,{"key":"a68989a51273746174656d656e745f7374617469737469637300018c89","value":"0154"} +,{"key":"a68989a5127461626c655f6d6574616461746100018c89","value":"018601"} ,{"key":"a68989a5127461626c655f7374617469737469637300018c89","value":"0128"} ,{"key":"a68989a5127461736b5f7061796c6f61647300018c89","value":"0176"} ,{"key":"a68989a51274656e616e745f69645f73657100018c89","value":"017e"} diff --git a/pkg/sql/catalog/catpb/catalog.go b/pkg/sql/catalog/catpb/catalog.go index c27238876757..95ccedfbfda7 100644 --- a/pkg/sql/catalog/catpb/catalog.go +++ b/pkg/sql/catalog/catpb/catalog.go @@ -14,6 +14,10 @@ package catpb // enabled table setting is enabled, disabled, or not set. type AutoStatsCollectionStatus int +// AutoPartialStatsCollectionStatus represents whether the auto stats +// collections enabled table setting is enabled, disabled or not set. +type AutoPartialStatsCollectionStatus int + // The values for AutoStatsCollectionStatus. const ( AutoStatsCollectionNotSet AutoStatsCollectionStatus = iota @@ -21,6 +25,12 @@ const ( AutoStatsCollectionDisabled ) +const ( + AutoPartialStatsCollectionNotSet AutoPartialStatsCollectionStatus = iota + AutoPartialStatsCollectionEnabled + AutoPartialStatsCollectionDisabled +) + const ( // AutoStatsEnabledSettingName is the name of the automatic stats collection // enabled cluster setting. @@ -53,6 +63,30 @@ const ( // AutoStatsFractionStaleTableSettingName is the name of the automatic stats // collection fraction stale rows table setting. AutoStatsFractionStaleTableSettingName = "sql_stats_automatic_collection_fraction_stale_rows" + + // AutoPartialStatsEnabledSettingName is the name of the automatic partial + // stats collection enabled cluster setting + AutoPartialStatsEnabledSettingName = "sql.stats.automatic_partial_collection.enabled" + + // AutoPartialStatsEnabledTableSettingName is the name of the automatic + // partial stats collection enabled table setting. + AutoPartialStatsEnabledTableSettingName = "sql_stats_automatic_partial_collection_enabled" + + // AutoPartialStatsMinStaleSettingName is the name of the automatic partial + // stats collection min stale rows cluster setting + AutoPartialStatsMinStaleSettingName = "sql.stats.automatic_partial_collection.min_stale_rows" + + // AutoPartialStatsMinStaleTableSettingName is the name of the automatic + // partial stats collection min stale rows table setting. + AutoPartialStatsMinStaleTableSettingName = "sql_stats_automatic_partial_collection_min_stale_rows" + + // AutoPartialStatsFractionStaleSettingName is the name of the automatic + // partial stats collection fraction stale rows cluster setting. + AutoPartialStatsFractionStaleSettingName = "sql.stats.automatic_partial_collection.fraction_stale_rows" + + // AutoPartialStatsFractionStaleTableSettingName is the name of the automatic + // partial stats collection fraction stale rows table setting. + AutoPartialStatsFractionStaleTableSettingName = "sql_stats_automatic_partial_collection_fraction_stale_rows" ) // AutoStatsCollectionEnabled indicates if automatic statistics collection is @@ -93,7 +127,10 @@ func (as *AutoStatsSettings) AutoStatsFractionStaleRows() (fractionStaleRows flo func (as *AutoStatsSettings) NoAutoStatsSettingsOverrides() bool { if as.Enabled != nil || as.MinStaleRows != nil || - as.FractionStaleRows != nil { + as.FractionStaleRows != nil || + as.PartialEnabled != nil || + as.PartialMinStaleRows != nil || + as.PartialFractionStaleRows != nil { return false } return true @@ -107,6 +144,43 @@ const TTLDefaultExpirationColumnName = "crdb_internal_expiration" // ttl_expiration_expression is not specified var DefaultTTLExpirationExpr = Expression(TTLDefaultExpirationColumnName) +// AutoPartialStatsCollectionEnabled indicates if automatic partial statistics +// collection is explicitly enabled or disabled. +func (as *AutoStatsSettings) AutoPartialStatsCollectionEnabled() AutoPartialStatsCollectionStatus { + if as.PartialEnabled == nil { + return AutoPartialStatsCollectionNotSet + } + if *as.PartialEnabled { + return AutoPartialStatsCollectionEnabled + } + return AutoPartialStatsCollectionDisabled +} + +// AutoPartialStatsMinStaleRows indicates the setting of +// sql_stats_automatic_partial_collection_min_stale_rows in +// AutoStatsSettings. If ok is true, then the minStaleRows value is +// valid, otherwise this has not been set. +func (as *AutoStatsSettings) AutoPartialStatsMinStaleRows() (minStaleRows int64, ok bool) { + if as.PartialMinStaleRows == nil { + return 0, false + } + return *as.PartialMinStaleRows, true +} + +// AutoPartialStatsFractionStaleRows indicates the setting of +// sql_stats_automatic_partial_collection_fraction_stale_rows in +// AutoStatsSettings. If ok is true, then the fractionStaleRows value is valid, +// otherwise this has not been set. +func (as *AutoStatsSettings) AutoPartialStatsFractionStaleRows() ( + fractionStaleRows float64, + ok bool, +) { + if as.PartialFractionStaleRows == nil { + return 0, false + } + return *as.PartialFractionStaleRows, true +} + // HasDurationExpr is a utility method to determine if ttl_expires_after was set func (rowLevelTTL *RowLevelTTL) HasDurationExpr() bool { return rowLevelTTL.DurationExpr != "" diff --git a/pkg/sql/catalog/catpb/catalog.proto b/pkg/sql/catalog/catpb/catalog.proto index 86d497cc8f07..7f8a2b27a170 100644 --- a/pkg/sql/catalog/catpb/catalog.proto +++ b/pkg/sql/catalog/catpb/catalog.proto @@ -215,4 +215,10 @@ message AutoStatsSettings { optional int64 min_stale_rows = 2; // FractionStaleRows is table setting sql_stats_automatic_collection_fraction_stale_rows. optional double fraction_stale_rows = 3; + // PartialEnabled is table setting sql_stats_automatic_partial_collection_enabled. + optional bool partial_enabled = 4; + // PartialMinStaleRows is table setting sql_stats_automatic_partial_collection_min_stale_rows. + optional int64 partial_min_stale_rows = 5; + // PartialFractionStaleRows is table setting sql_stats_automatic_partial_collection_fraction_stale_rows. + optional double partial_fraction_stale_rows = 6; } diff --git a/pkg/sql/catalog/catpb/function.proto b/pkg/sql/catalog/catpb/function.proto index ae6b0681d0f6..2c0da590be3a 100644 --- a/pkg/sql/catalog/catpb/function.proto +++ b/pkg/sql/catalog/catpb/function.proto @@ -46,6 +46,11 @@ message Function { VARIADIC = 4; } } + + enum Security { + INVOKER = 0; + DEFINER = 1; + } } // These wrappers are for the convenience of referencing the enum types from a @@ -69,3 +74,9 @@ message FunctionParamClass { option (gogoproto.equal) = true; optional Function.Param.Class class = 1 [(gogoproto.nullable) = false]; } + +message FunctionSecurity { + option (gogoproto.equal) = true; + // If security is not explicitly set, the default security mode is INVOKER. + optional Function.Security security = 1 [(gogoproto.nullable) = false]; +} diff --git a/pkg/sql/catalog/catprivilege/system.go b/pkg/sql/catalog/catprivilege/system.go index 46ecfcc0848a..12a1f603051d 100644 --- a/pkg/sql/catalog/catprivilege/system.go +++ b/pkg/sql/catalog/catprivilege/system.go @@ -78,6 +78,7 @@ var ( catconstants.MVCCStatistics, catconstants.TxnExecInsightsTableName, catconstants.StmtExecInsightsTableName, + catconstants.TableMetadata, } readWriteSystemSequences = []catconstants.SystemTableName{ diff --git a/pkg/sql/catalog/descpb/structured.proto b/pkg/sql/catalog/descpb/structured.proto index 7121af7d2253..c7321b0d7a4e 100644 --- a/pkg/sql/catalog/descpb/structured.proto +++ b/pkg/sql/catalog/descpb/structured.proto @@ -1732,7 +1732,11 @@ message FunctionDescriptor { // depends on. repeated uint32 depends_on_functions = 22 [(gogoproto.casttype) = "ID"]; - // Next field id is 23 + // Security contains the security mode that this function gets executed with. + // The default mode is INVOKER. + optional cockroach.sql.catalog.catpb.Function.Security security = 23 [(gogoproto.nullable) = false]; + + // Next field id is 24 } // Descriptor is a union type for descriptors for tables, schemas, databases, diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index 86b80ceda0d1..20dab3c12669 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -729,6 +729,9 @@ type TableDescriptor interface { // AutoStatsCollectionEnabled indicates if automatic statistics collection is // explicitly enabled or disabled for this table. AutoStatsCollectionEnabled() catpb.AutoStatsCollectionStatus + // AutoPartialStatsCollectionEnabled indicates if automatic partial statistics + // collection is explicitly enabled or disabled for this table. + AutoPartialStatsCollectionEnabled() catpb.AutoPartialStatsCollectionStatus // AutoStatsMinStaleRows indicates the setting of // sql_stats_automatic_collection_min_stale_rows for this table. // If ok is true, then the minStaleRows value is valid, otherwise this has not @@ -999,6 +1002,9 @@ type FunctionDescriptor interface { // IsProcedure returns true if the descriptor represents a procedure. It // returns false if the descriptor represents a user-defined function. IsProcedure() bool + + // GetSecurity returns the security specification of this function. + GetSecurity() catpb.Function_Security } // FilterDroppedDescriptor returns an error if the descriptor state is DROP. diff --git a/pkg/sql/catalog/funcdesc/func_desc.go b/pkg/sql/catalog/funcdesc/func_desc.go index a27a91e4f802..0f915230c481 100644 --- a/pkg/sql/catalog/funcdesc/func_desc.go +++ b/pkg/sql/catalog/funcdesc/func_desc.go @@ -576,6 +576,11 @@ func (desc *Mutable) SetFuncBody(v string) { desc.FunctionBody = v } +// SetSecurity sets the Security attribute. +func (desc *Mutable) SetSecurity(v catpb.Function_Security) { + desc.Security = v +} + // SetName sets the function name. func (desc *Mutable) SetName(n string) { desc.Name = n @@ -775,6 +780,10 @@ func (desc *immutable) GetLanguage() catpb.Function_Language { return desc.Lang } +func (desc *immutable) GetSecurity() catpb.Function_Security { + return desc.Security +} + func (desc *immutable) ToOverload() (ret *tree.Overload, err error) { routineType := tree.UDFRoutine if desc.IsProcedure() { @@ -885,14 +894,15 @@ func (desc *immutable) ToCreateExpr() (ret *tree.CreateRoutine, err error) { } } } - // We only store 5 function attributes at the moment. We may extend the + // We only store 6 function attributes at the moment. We may extend the // pre-allocated capacity in the future. - ret.Options = make(tree.RoutineOptions, 0, 5) + ret.Options = make(tree.RoutineOptions, 0, 6) ret.Options = append(ret.Options, desc.getCreateExprVolatility()) ret.Options = append(ret.Options, tree.RoutineLeakproof(desc.LeakProof)) ret.Options = append(ret.Options, desc.getCreateExprNullInputBehavior()) ret.Options = append(ret.Options, tree.RoutineBodyStr(desc.FunctionBody)) ret.Options = append(ret.Options, desc.getCreateExprLang()) + ret.Options = append(ret.Options, desc.getCreateExprSecurity()) return ret, nil } @@ -935,7 +945,17 @@ func (desc *immutable) getCreateExprNullInputBehavior() tree.RoutineNullInputBeh return 0 } -// ToTreeRoutineParamClass converts the proto enum value to the correspoding +func (desc *immutable) getCreateExprSecurity() tree.RoutineSecurity { + switch desc.Security { + case catpb.Function_INVOKER: + return tree.RoutineInvoker + case catpb.Function_DEFINER: + return tree.RoutineDefiner + } + return 0 +} + +// ToTreeRoutineParamClass converts the proto enum value to the corresponding // tree.RoutineParamClass. func ToTreeRoutineParamClass(class catpb.Function_Param_Class) tree.RoutineParamClass { switch class { diff --git a/pkg/sql/catalog/funcinfo/properties.go b/pkg/sql/catalog/funcinfo/properties.go index 3e8bf510247a..910ba25b20bf 100644 --- a/pkg/sql/catalog/funcinfo/properties.go +++ b/pkg/sql/catalog/funcinfo/properties.go @@ -135,3 +135,14 @@ func ParamClassToProto(v tree.RoutineParamClass) (catpb.Function_Param_Class, er return -1, errors.AssertionFailedf("unknown function parameter class %q", v) } + +// SecurityToProto converts sql statement input security to protobuf type. +func SecurityToProto(v tree.RoutineSecurity) (catpb.Function_Security, error) { + switch v { + case tree.RoutineInvoker: + return catpb.Function_INVOKER, nil + case tree.RoutineDefiner: + return catpb.Function_DEFINER, nil + } + return -1, errors.AssertionFailedf("unknown function security class %q", v) +} diff --git a/pkg/sql/catalog/internal/catkv/testdata/testdata_app b/pkg/sql/catalog/internal/catkv/testdata/testdata_app index 1d76a8b7d6f2..fa751bade8f8 100644 --- a/pkg/sql/catalog/internal/catkv/testdata/testdata_app +++ b/pkg/sql/catalog/internal/catkv/testdata/testdata_app @@ -375,6 +375,9 @@ catalog: "066": descriptor: relation namespace: (1, 29, "statement_execution_insights") + "067": + descriptor: relation + namespace: (1, 29, "table_metadata") "100": comments: database: this is the default database diff --git a/pkg/sql/catalog/internal/catkv/testdata/testdata_system b/pkg/sql/catalog/internal/catkv/testdata/testdata_system index 1ee37bf844bc..829b2dd83378 100644 --- a/pkg/sql/catalog/internal/catkv/testdata/testdata_system +++ b/pkg/sql/catalog/internal/catkv/testdata/testdata_system @@ -393,6 +393,9 @@ catalog: "066": descriptor: relation namespace: (1, 29, "statement_execution_insights") + "067": + descriptor: relation + namespace: (1, 29, "table_metadata") "100": comments: database: this is the default database diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 0da3eebb7e47..c73350576b1a 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -1577,17 +1577,20 @@ func (m *Manager) checkRangeFeedStatus(ctx context.Context) (forceRefresh bool) // range feed progress / recovery, and supporting legacy expiry // based leases. func (m *Manager) RunBackgroundLeasingTask(ctx context.Context) { - _ = m.stopper.RunAsyncTask(ctx, "lease-refresher", func(ctx context.Context) { - refreshTimerDuration := LeaseDuration.Get(&m.storage.settings.SV) - renewalsDisabled := false - if refreshTimerDuration <= 0 { + renewalsDisabled := false + getRefreshTimerDuration := func() time.Duration { + if LeaseDuration.Get(&m.storage.settings.SV) <= 0 { // Session based leasing still needs a refresh loop to expire // leases, so we will execute that without any renewals. - refreshTimerDuration = time.Millisecond * 200 renewalsDisabled = true + return 200 * time.Millisecond } else { - refreshTimerDuration = m.storage.jitteredLeaseDuration() + renewalsDisabled = false + return m.storage.jitteredLeaseDuration() } + } + _ = m.stopper.RunAsyncTask(ctx, "lease-refresher", func(ctx context.Context) { + refreshTimerDuration := getRefreshTimerDuration() var refreshTimer timeutil.Timer defer refreshTimer.Stop() refreshTimer.Reset(refreshTimerDuration / 2) @@ -1624,7 +1627,7 @@ func (m *Manager) RunBackgroundLeasingTask(ctx context.Context) { m.refreshSomeLeases(ctx, true /*refreshAll*/) case <-refreshTimer.C: refreshTimer.Read = true - refreshTimer.Reset(m.storage.jitteredLeaseDuration() / 2) + refreshTimer.Reset(getRefreshTimerDuration() / 2) // Check for any react to any range feed availability problems, and // if needed refresh the full set of descriptors. diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index d4044aec28f0..01d73b31435b 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -3674,7 +3674,7 @@ func TestLeaseDescriptorRangeFeedFailure(t *testing.T) { // so the update is detected. if p.Params.ExecutionPhase == scop.PostCommitPhase && enableAfterStageKnob.Load() && - strings.Contains(p.Statements[0].Statement, "ADD COLUMN") { + strings.Contains(p.Statements[0].Statement, "ALTER TABLE t1 ADD COLUMN j INT DEFAULT 64") { rangeFeedResetChan = srv.ApplicationLayer(1).LeaseManager().(*lease.Manager).TestingSetDisableRangeFeedCheckpointFn(true) } return nil @@ -3684,7 +3684,7 @@ func TestLeaseDescriptorRangeFeedFailure(t *testing.T) { // so the update is detected. if p.Params.ExecutionPhase == scop.PostCommitPhase && enableAfterStageKnob.Load() && - strings.Contains(p.Statements[0].Statement, "ADD COLUMN") { + strings.Contains(p.Statements[0].Statement, "ALTER TABLE t1 ADD COLUMN j INT DEFAULT 64") { <-rangeFeedResetChan srv.ApplicationLayer(1).LeaseManager().(*lease.Manager).TestingSetDisableRangeFeedCheckpointFn(false) enableAfterStageKnob.Swap(false) diff --git a/pkg/sql/catalog/serial_helper.go b/pkg/sql/catalog/serial_helper.go new file mode 100644 index 000000000000..530ed06bdcc5 --- /dev/null +++ b/pkg/sql/catalog/serial_helper.go @@ -0,0 +1,54 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package catalog + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" +) + +func UseRowID(d tree.ColumnTableDef) *tree.ColumnTableDef { + d.DefaultExpr.Expr = &tree.FuncExpr{Func: tree.WrapFunction("unique_rowid")} + d.Type = types.Int + // Column is non-nullable in all cases. PostgreSQL requires this. + d.Nullable.Nullability = tree.NotNull + + return &d +} + +func AssertValidSerialColumnDef(d *tree.ColumnTableDef, tableName *tree.TableName) error { + if d.HasDefaultExpr() { + // SERIAL implies a new default expression, we can't have one to + // start with. This is the error produced by pg in such case. + return pgerror.Newf(pgcode.Syntax, + "multiple default values specified for column %q of table %q", + tree.ErrString(&d.Name), tree.ErrString(tableName)) + } + + if d.Nullable.Nullability == tree.Null { + // SERIAL implies a non-NULL column, we can't accept a nullability + // spec. This is the error produced by pg in such case. + return pgerror.Newf(pgcode.Syntax, + "conflicting NULL/NOT NULL declarations for column %q of table %q", + tree.ErrString(&d.Name), tree.ErrString(tableName)) + } + + if d.Computed.Expr != nil { + // SERIAL cannot be a computed column. + return pgerror.Newf(pgcode.Syntax, + "SERIAL column %q of table %q cannot be computed", + tree.ErrString(&d.Name), tree.ErrString(tableName)) + } + + return nil +} diff --git a/pkg/sql/catalog/systemschema/system.go b/pkg/sql/catalog/systemschema/system.go index a7e074bb558c..ecf6c07d4f65 100644 --- a/pkg/sql/catalog/systemschema/system.go +++ b/pkg/sql/catalog/systemschema/system.go @@ -847,8 +847,18 @@ CREATE TABLE system.sql_instances ( sql_addr STRING, crdb_region BYTES NOT NULL, binary_version STRING, + is_draining BOOL NULL, CONSTRAINT "primary" PRIMARY KEY (crdb_region, id), - FAMILY "primary" (id, addr, session_id, locality, sql_addr, crdb_region, binary_version) + FAMILY "primary" ( + id, + addr, + session_id, + locality, + sql_addr, + crdb_region, + binary_version, + is_draining + ) )` SpanConfigurationsTableSchema = ` @@ -1175,6 +1185,54 @@ CREATE TABLE system.mvcc_statistics ( created ) );` + + crdbInternalTableIdLastUpdatedShard = "mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), 16:::INT8)" + TableMetadataTableSchema = ` CREATE TABLE system.table_metadata ( + db_id INT8 NOT NULL, + table_id INT8 NOT NULL, + db_name STRING NOT NULL, + schema_name STRING NOT NULL, + table_name STRING NOT NULL, + total_columns INT8 NOT NULL, + total_indexes INT8 NOT NULL, + store_ids INT[] NOT NULL, + replication_size_bytes INT NOT NULL, + total_ranges INT NOT NULL, + total_live_data_bytes INT NOT NULL, + total_data_bytes INT NOT NULL, + perc_live_data FLOAT NOT NULL, + last_update_error string, + last_updated TIMESTAMPTZ NOT NULL DEFAULT now(), + crdb_internal_last_updated_table_id_shard_16 INT4 NOT VISIBLE NOT NULL AS (` + crdbInternalTableIdLastUpdatedShard + `) VIRTUAL, + CONSTRAINT "primary" PRIMARY KEY (db_id, table_id), + INDEX "replication_size_bytes_table_id_idx" (replication_size_bytes desc, table_id), + INDEX "total_ranges_table_id_idx" (total_ranges desc, table_id), + INDEX "total_columns_table_id_idx" (total_columns desc, table_id), + INDEX "total_indexes_table_id_idx" (total_indexes desc, table_id), + INDEX "perc_live_data_id_idx" (perc_live_data desc, table_id), + INDEX "last_updated_idx" (last_updated desc, table_id) USING HASH, + INVERTED INDEX db_name_gin (db_name gin_trgm_ops), + INVERTED INDEX table_name_gin (table_name gin_trgm_ops), + INVERTED INDEX schema_name_gin (schema_name gin_trgm_ops), + INVERTED INDEX store_ids_gin (store_ids), + FAMILY "primary" ( + db_id, + table_id, + db_name, + schema_name, + table_name, + total_columns, + total_indexes, + store_ids, + replication_size_bytes, + total_ranges, + total_live_data_bytes, + total_data_bytes, + perc_live_data, + last_update_error, + last_updated + ) + );` ) func pk(name string) descpb.IndexDescriptor { @@ -1227,7 +1285,7 @@ const SystemDatabaseName = catconstants.SystemDatabaseName // release version). // // NB: Don't set this to clusterversion.Latest; use a specific version instead. -var SystemDatabaseSchemaBootstrapVersion = clusterversion.V24_3_AddTimeseriesZoneConfig.Version() +var SystemDatabaseSchemaBootstrapVersion = clusterversion.V24_3_SQLInstancesAddDraining.Version() // MakeSystemDatabaseDesc constructs a copy of the system database // descriptor. @@ -1418,6 +1476,7 @@ func MakeSystemTables() []SystemTable { SystemMVCCStatisticsTable, StatementExecInsightsTable, TransactionExecInsightsTable, + TableMetadata, } } @@ -3844,13 +3903,14 @@ var ( {Name: "sql_addr", ID: 5, Type: types.String, Nullable: true}, {Name: "crdb_region", ID: 6, Type: types.Bytes, Nullable: false}, {Name: "binary_version", ID: 7, Type: types.String, Nullable: true}, + {Name: "is_draining", ID: 8, Type: types.Bool, Nullable: true}, }, []descpb.ColumnFamilyDescriptor{ { Name: "primary", ID: 0, - ColumnNames: []string{"id", "addr", "session_id", "locality", "sql_addr", "crdb_region", "binary_version"}, - ColumnIDs: []descpb.ColumnID{1, 2, 3, 4, 5, 6, 7}, + ColumnNames: []string{"id", "addr", "session_id", "locality", "sql_addr", "crdb_region", "binary_version", "is_draining"}, + ColumnIDs: []descpb.ColumnID{1, 2, 3, 4, 5, 6, 7, 8}, DefaultColumnID: 0, }, }, @@ -4738,6 +4798,222 @@ var ( tbl.NextConstraintID++ }, ) + + crdbInternalTableIdLastUpdatedShardStr = crdbInternalTableIdLastUpdatedShard + + TableMetadata = makeSystemTable( + TableMetadataTableSchema, + systemTable(catconstants.TableMetadata, + descpb.InvalidID, // dynamically assigned table ID + []descpb.ColumnDescriptor{ + {Name: "db_id", ID: 1, Type: types.Int, Nullable: false}, + {Name: "table_id", ID: 2, Type: types.Int, Nullable: false}, + {Name: "db_name", ID: 3, Type: types.String, Nullable: false}, + {Name: "schema_name", ID: 4, Type: types.String, Nullable: false}, + {Name: "table_name", ID: 5, Type: types.String, Nullable: false}, + {Name: "total_columns", ID: 6, Type: types.Int, Nullable: false}, + {Name: "total_indexes", ID: 7, Type: types.Int, Nullable: false}, + {Name: "store_ids", ID: 8, Type: types.IntArray, Nullable: false}, + {Name: "replication_size_bytes", ID: 9, Type: types.Int, Nullable: false}, + {Name: "total_ranges", ID: 10, Type: types.Int, Nullable: false}, + {Name: "total_live_data_bytes", ID: 11, Type: types.Int, Nullable: false}, + {Name: "total_data_bytes", ID: 12, Type: types.Int, Nullable: false}, + {Name: "perc_live_data", ID: 13, Type: types.Float, Nullable: false}, + {Name: "last_update_error", ID: 14, Type: types.String, Nullable: true}, + {Name: "last_updated", ID: 15, Type: types.TimestampTZ, Nullable: false, DefaultExpr: &nowTZString}, + { + Name: "crdb_internal_last_updated_table_id_shard_16", + ID: 16, + Type: types.Int4, + Nullable: false, + ComputeExpr: &crdbInternalTableIdLastUpdatedShardStr, + Hidden: true, + Virtual: true, + }, + }, + []descpb.ColumnFamilyDescriptor{ + { + Name: "primary", + ID: 0, + ColumnNames: []string{ + "db_id", + "table_id", + "db_name", + "schema_name", + "table_name", + "total_columns", + "total_indexes", + "store_ids", + "replication_size_bytes", + "total_ranges", + "total_live_data_bytes", + "total_data_bytes", + "perc_live_data", + "last_update_error", + "last_updated", + }, + ColumnIDs: []descpb.ColumnID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + }, + }, + descpb.IndexDescriptor{ + Name: tabledesc.LegacyPrimaryKeyIndexName, + ID: 1, + Unique: true, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"db_id", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_ASC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{1, 2}, + }, + descpb.IndexDescriptor{ + Name: "replication_size_bytes_table_id_idx", + ID: 2, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"replication_size_bytes", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{9, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + }, + descpb.IndexDescriptor{ + Name: "total_ranges_table_id_idx", + ID: 3, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"total_ranges", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{10, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + }, + descpb.IndexDescriptor{ + Name: "total_columns_table_id_idx", + ID: 4, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"total_columns", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{6, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + }, + descpb.IndexDescriptor{ + Name: "total_indexes_table_id_idx", + ID: 5, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"total_indexes", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{7, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + }, + descpb.IndexDescriptor{ + Name: "perc_live_data_id_idx", + ID: 6, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"perc_live_data", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{13, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + CompositeColumnIDs: []descpb.ColumnID{13}, + }, + descpb.IndexDescriptor{ + Name: "last_updated_idx", + ID: 7, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"crdb_internal_last_updated_table_id_shard_16", "last_updated", "table_id"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{ + catenumpb.IndexColumn_ASC, + catenumpb.IndexColumn_DESC, + catenumpb.IndexColumn_ASC, + }, + KeyColumnIDs: []descpb.ColumnID{16, 15, 2}, + KeySuffixColumnIDs: []descpb.ColumnID{1}, + Sharded: catpb.ShardedDescriptor{ + IsSharded: true, + Name: "crdb_internal_last_updated_table_id_shard_16", + ShardBuckets: 16, // Cluster setting default. + ColumnNames: []string{"last_updated", "table_id"}, + }, + }, + descpb.IndexDescriptor{ + Name: "db_name_gin", + Type: descpb.IndexDescriptor_INVERTED, + ID: 8, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"db_name"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{catenumpb.IndexColumn_ASC}, + InvertedColumnKinds: []catpb.InvertedIndexColumnKind{catpb.InvertedIndexColumnKind_TRIGRAM}, + KeyColumnIDs: []descpb.ColumnID{3}, + KeySuffixColumnIDs: []descpb.ColumnID{1, 2}, + }, + descpb.IndexDescriptor{ + Name: "table_name_gin", + Type: descpb.IndexDescriptor_INVERTED, + ID: 9, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"table_name"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{catenumpb.IndexColumn_ASC}, + InvertedColumnKinds: []catpb.InvertedIndexColumnKind{catpb.InvertedIndexColumnKind_TRIGRAM}, + KeyColumnIDs: []descpb.ColumnID{5}, + KeySuffixColumnIDs: []descpb.ColumnID{1, 2}, + }, + descpb.IndexDescriptor{ + Name: "schema_name_gin", + Type: descpb.IndexDescriptor_INVERTED, + ID: 10, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"schema_name"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{catenumpb.IndexColumn_ASC}, + InvertedColumnKinds: []catpb.InvertedIndexColumnKind{catpb.InvertedIndexColumnKind_TRIGRAM}, + KeyColumnIDs: []descpb.ColumnID{4}, + KeySuffixColumnIDs: []descpb.ColumnID{1, 2}, + }, + descpb.IndexDescriptor{ + Name: "store_ids_gin", + Type: descpb.IndexDescriptor_INVERTED, + ID: 11, + Unique: false, + Version: descpb.StrictIndexColumnIDGuaranteesVersion, + KeyColumnNames: []string{"store_ids"}, + KeyColumnDirections: []catenumpb.IndexColumn_Direction{catenumpb.IndexColumn_ASC}, + InvertedColumnKinds: []catpb.InvertedIndexColumnKind{catpb.InvertedIndexColumnKind_DEFAULT}, + KeyColumnIDs: []descpb.ColumnID{8}, + KeySuffixColumnIDs: []descpb.ColumnID{1, 2}, + }, + ), func(tbl *descpb.TableDescriptor) { + tbl.Checks = []*descpb.TableDescriptor_CheckConstraint{{ + Expr: "crdb_internal_last_updated_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8)", + Name: "check_crdb_internal_last_updated_table_id_shard_16", + Validity: descpb.ConstraintValidity_Validated, + ColumnIDs: []descpb.ColumnID{16}, + IsNonNullConstraint: false, + FromHashShardedColumn: true, + ConstraintID: tbl.NextConstraintID, + }} + tbl.NextConstraintID++ + }, + ) ) // SpanConfigurationsTableName represents system.span_configurations. diff --git a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system index abad86c3336b..3ebc93a6c24a 100644 --- a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system +++ b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_system @@ -412,6 +412,7 @@ CREATE TABLE public.sql_instances ( sql_addr STRING NULL, crdb_region BYTES NOT NULL, binary_version STRING NULL, + is_draining BOOL NULL, CONSTRAINT "primary" PRIMARY KEY (crdb_region ASC, id ASC) ); CREATE TABLE public.span_configurations ( @@ -637,12 +638,41 @@ CREATE TABLE public.statement_execution_insights ( INDEX statement_fingerprint_id_idx (statement_fingerprint_id ASC, start_time DESC, end_time DESC), INDEX time_range_idx (start_time DESC, end_time DESC) USING HASH WITH (bucket_count=16) ); +CREATE TABLE public.table_metadata ( + db_id INT8 NOT NULL, + table_id INT8 NOT NULL, + db_name STRING NOT NULL, + schema_name STRING NOT NULL, + table_name STRING NOT NULL, + total_columns INT8 NOT NULL, + total_indexes INT8 NOT NULL, + store_ids INT8[] NOT NULL, + replication_size_bytes INT8 NOT NULL, + total_ranges INT8 NOT NULL, + total_live_data_bytes INT8 NOT NULL, + total_data_bytes INT8 NOT NULL, + perc_live_data FLOAT8 NOT NULL, + last_update_error STRING NULL, + last_updated TIMESTAMPTZ NOT NULL DEFAULT now():::TIMESTAMPTZ, + crdb_internal_last_updated_table_id_shard_16 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), 16:::INT8)) VIRTUAL, + CONSTRAINT "primary" PRIMARY KEY (db_id ASC, table_id ASC), + INDEX replication_size_bytes_table_id_idx (replication_size_bytes DESC, table_id ASC), + INDEX total_ranges_table_id_idx (total_ranges DESC, table_id ASC), + INDEX total_columns_table_id_idx (total_columns DESC, table_id ASC), + INDEX total_indexes_table_id_idx (total_indexes DESC, table_id ASC), + INDEX perc_live_data_id_idx (perc_live_data DESC, table_id ASC), + INDEX last_updated_idx (last_updated DESC, table_id ASC) USING HASH WITH (bucket_count=16), + INVERTED INDEX db_name_gin (db_name gin_trgm_ops), + INVERTED INDEX table_name_gin (table_name gin_trgm_ops), + INVERTED INDEX schema_name_gin (schema_name gin_trgm_ops), + INVERTED INDEX store_ids_gin (store_ids) +); schema_telemetry ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"database":{"name":"postgres","id":102,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":103}},"defaultPrivileges":{}}} -{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":6}}} +{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":14}}} {"table":{"name":"comments","id":24,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"type","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"object_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"sub_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"comment","id":4,"type":{"family":"StringFamily","oid":25}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["type","object_id","sub_id"],"columnIds":[1,2,3]},{"name":"fam_4_comment","id":4,"columnNames":["comment"],"columnIds":[4],"defaultColumnId":4}],"nextFamilyId":5,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["type","object_id","sub_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["comment"],"keyColumnIds":[1,2,3],"storeColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"public","privileges":"32"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"database_role_settings","id":44,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"database_id","id":1,"type":{"family":"OidFamily","oid":26}},{"name":"role_name","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"settings","id":3,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}}},{"name":"role_id","id":4,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["database_id","role_name","settings","role_id"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["database_id","role_name"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings","role_id"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"database_role_settings_database_id_role_id_key","id":2,"unique":true,"version":3,"keyColumnNames":["database_id","role_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings"],"keyColumnIds":[1,4],"keySuffixColumnIds":[2],"storeColumnIds":[3],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"descriptor","id":3,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"descriptor","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_descriptor","id":2,"columnNames":["descriptor"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["descriptor"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} @@ -677,7 +707,7 @@ schema_telemetry {"table":{"name":"span_stats_samples","id":57,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"sample_time","id":2,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","sample_time"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["sample_time"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"samples_sample_time_idx","id":2,"unique":true,"version":3,"keyColumnNames":["sample_time"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"span_stats_tenant_boundaries","id":58,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"boundaries","id":2,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["tenant_id","boundaries"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id"],"keyColumnDirections":["ASC"],"storeColumnNames":["boundaries"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} -{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"sqlliveness","id":39,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"session_id","id":1,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":2,"type":{"family":"DecimalFamily","oid":1700}},{"name":"crdb_region","id":3,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["crdb_region","session_id","expiration"],"columnIds":[3,1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","session_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["expiration"],"keyColumnIds":[3,1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"statement_activity","id":61,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"plan_hash","id":4,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"agg_interval","id":6,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":8,"type":{"family":"JsonFamily","oid":3802}},{"name":"plan","id":9,"type":{"family":"JsonFamily","oid":3802}},{"name":"index_recommendations","id":10,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"defaultExpr":"ARRAY[]:::STRING[]"},{"name":"execution_count","id":11,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"execution_total_seconds","id":12,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"execution_total_cluster_seconds","id":13,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"contention_time_avg_seconds","id":14,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"cpu_sql_avg_nanos","id":15,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"service_latency_avg_seconds","id":16,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"service_latency_p99_seconds","id":17,"type":{"family":"FloatFamily","width":64,"oid":701}}],"nextColumnId":18,"families":[{"name":"primary","columnNames":["aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","agg_interval","metadata","statistics","plan","index_recommendations","execution_count","execution_total_seconds","execution_total_cluster_seconds","contention_time_avg_seconds","cpu_sql_avg_nanos","service_latency_avg_seconds","service_latency_p99_seconds"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","plan","index_recommendations","execution_count","execution_total_seconds","execution_total_cluster_seconds","contention_time_avg_seconds","cpu_sql_avg_nanos","service_latency_avg_seconds","service_latency_p99_seconds"],"keyColumnIds":[1,2,3,4,5],"storeColumnIds":[6,7,8,9,10,11,12,13,14,15,16,17],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_id_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id","transaction_fingerprint_id"],"keyColumnDirections":["ASC","ASC"],"keyColumnIds":[2,3],"keySuffixColumnIds":[1,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","execution_count"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,11],"keySuffixColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_total_seconds_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","execution_total_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,12],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"contention_time_avg_seconds_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","contention_time_avg_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,14],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"cpu_sql_avg_nanos_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","cpu_sql_avg_nanos"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,15],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[15],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"service_latency_avg_seconds_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","service_latency_avg_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,16],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[16],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"service_latency_p99_seconds_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","service_latency_p99_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,17],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[17],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"statement_bundle_chunks","id":34,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"description","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"data","id":3,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","description","data"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["description","data"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} @@ -685,6 +715,7 @@ schema_telemetry {"table":{"name":"statement_diagnostics_requests","id":35,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"completed","id":2,"type":{"oid":16},"defaultExpr":"false"},{"name":"statement_fingerprint","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"statement_diagnostics_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"requested_at","id":5,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"min_execution_latency","id":6,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}},"nullable":true},{"name":"expires_at","id":7,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"sampling_probability","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"plan_gist","id":9,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"anti_plan_gist","id":10,"type":{"oid":16},"nullable":true},{"name":"redacted","id":11,"type":{"oid":16},"defaultExpr":"false"}],"nextColumnId":12,"families":[{"name":"primary","columnNames":["id","completed","statement_fingerprint","statement_diagnostics_id","requested_at","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["completed","statement_fingerprint","statement_diagnostics_id","requested_at","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9,10,11],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"completed_idx","id":2,"version":3,"keyColumnNames":["completed","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["statement_fingerprint","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"keyColumnIds":[2,1],"storeColumnIds":[3,6,7,8,9,10,11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"sampling_probability BETWEEN _:::FLOAT8 AND _:::FLOAT8","name":"check_sampling_probability","columnIds":[8],"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"statement_execution_insights","id":66,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"session_id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"transaction_id","id":2,"type":{"family":"UuidFamily","oid":2950}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"statement_id","id":4,"type":{"family":"StringFamily","oid":25}},{"name":"statement_fingerprint_id","id":5,"type":{"family":"BytesFamily","oid":17}},{"name":"problem","id":6,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"causes","id":7,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}},"nullable":true},{"name":"query","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"status","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"start_time","id":10,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"end_time","id":11,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"full_scan","id":12,"type":{"oid":16},"nullable":true},{"name":"user_name","id":13,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"app_name","id":14,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_priority","id":15,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"database_name","id":16,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"plan_gist","id":17,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"retries","id":18,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"last_retry_reason","id":19,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"execution_node_ids","id":20,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}},"nullable":true},{"name":"index_recommendations","id":21,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"nullable":true},{"name":"implicit_txn","id":22,"type":{"oid":16},"nullable":true},{"name":"cpu_sql_nanos","id":23,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"error_code","id":24,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"contention_time","id":25,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}},"nullable":true},{"name":"contention_info","id":26,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"details","id":27,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"created","id":28,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"crdb_internal_end_time_start_time_shard_16","id":29,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(md5(crdb_internal.datums_to_bytes(end_time, start_time))), _:::INT8)","virtual":true}],"nextColumnId":30,"families":[{"name":"primary","columnNames":["session_id","transaction_id","transaction_fingerprint_id","statement_id","statement_fingerprint_id","problem","causes","query","status","start_time","end_time","full_scan","user_name","app_name","user_priority","database_name","plan_gist","retries","last_retry_reason","execution_node_ids","index_recommendations","implicit_txn","cpu_sql_nanos","error_code","contention_time","contention_info","details","created"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["statement_id","transaction_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["session_id","transaction_fingerprint_id","statement_fingerprint_id","problem","causes","query","status","start_time","end_time","full_scan","user_name","app_name","user_priority","database_name","plan_gist","retries","last_retry_reason","execution_node_ids","index_recommendations","implicit_txn","cpu_sql_nanos","error_code","contention_time","contention_info","details","created"],"keyColumnIds":[4,2],"storeColumnIds":[1,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"transaction_id_idx","id":2,"version":3,"keyColumnNames":["transaction_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"transaction_fingerprint_id_idx","id":3,"version":3,"keyColumnNames":["transaction_fingerprint_id","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[3,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"statement_fingerprint_id_idx","id":4,"version":3,"keyColumnNames":["statement_fingerprint_id","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[5,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"time_range_idx","id":5,"version":3,"keyColumnNames":["crdb_internal_end_time_start_time_shard_16","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[29,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{"isSharded":true,"name":"crdb_internal_end_time_start_time_shard_16","shardBuckets":16,"columnNames":["end_time","start_time"]},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_end_time_start_time_shard_16 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_end_time_start_time_shard_16","columnIds":[29],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"statement_statistics","id":42,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"plan_hash","id":4,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":7,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":8,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":9,"type":{"family":"JsonFamily","oid":3802}},{"name":"plan","id":10,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","id":11,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id, plan_hash, transaction_fingerprint_id)), _:::INT8)"},{"name":"index_recommendations","id":12,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"defaultExpr":"ARRAY[]:::STRING[]"},{"name":"indexes_usage","id":13,"type":{"family":"JsonFamily","oid":3802},"nullable":true,"computeExpr":"(statistics-\u003e'_':::STRING)-\u003e'_':::STRING","virtual":true},{"name":"execution_count","id":14,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":15,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":16,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":17,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":18,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":19,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":20,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","node_id","agg_interval","metadata","statistics","plan","index_recommendations","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[11,1,2,3,4,5,6,7,8,9,10,12,14,15,16,17,18,19]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","plan","index_recommendations","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[11,1,2,3,4,5,6],"storeColumnIds":[7,8,9,10,12,14,15,16,17,18,19],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id","plan_hash","transaction_fingerprint_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id","transaction_fingerprint_id"],"keyColumnDirections":["ASC","ASC"],"keyColumnIds":[2,3],"keySuffixColumnIds":[11,1,4,5,6],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"indexes_usage_idx","id":3,"version":3,"keyColumnNames":["indexes_usage"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["DEFAULT"],"keyColumnIds":[13],"keySuffixColumnIds":[11,1,2,3,4,5,6],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,14],"keySuffixColumnIds":[11,2,3,4,6],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,15],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[15],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,16],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[16],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,17],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[17],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,18],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[18],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":9,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,19],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[19],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":10,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","columnIds":[11],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"table_metadata","id":67,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"db_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"table_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"db_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"schema_name","id":4,"type":{"family":"StringFamily","oid":25}},{"name":"table_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"total_columns","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_indexes","id":7,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"store_ids","id":8,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}}},{"name":"replication_size_bytes","id":9,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_ranges","id":10,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_live_data_bytes","id":11,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_data_bytes","id":12,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"perc_live_data","id":13,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"last_update_error","id":14,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"last_updated","id":15,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"crdb_internal_last_updated_table_id_shard_16","id":16,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), _:::INT8)","virtual":true}],"nextColumnId":17,"families":[{"name":"primary","columnNames":["db_id","table_id","db_name","schema_name","table_name","total_columns","total_indexes","store_ids","replication_size_bytes","total_ranges","total_live_data_bytes","total_data_bytes","perc_live_data","last_update_error","last_updated"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["db_id","table_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["db_name","schema_name","table_name","total_columns","total_indexes","store_ids","replication_size_bytes","total_ranges","total_live_data_bytes","total_data_bytes","perc_live_data","last_update_error","last_updated"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14,15],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"replication_size_bytes_table_id_idx","id":2,"version":3,"keyColumnNames":["replication_size_bytes","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[9,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_ranges_table_id_idx","id":3,"version":3,"keyColumnNames":["total_ranges","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[10,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_columns_table_id_idx","id":4,"version":3,"keyColumnNames":["total_columns","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[6,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_indexes_table_id_idx","id":5,"version":3,"keyColumnNames":["total_indexes","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[7,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"perc_live_data_id_idx","id":6,"version":3,"keyColumnNames":["perc_live_data","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[13,2],"keySuffixColumnIds":[1],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"last_updated_idx","id":7,"version":3,"keyColumnNames":["crdb_internal_last_updated_table_id_shard_16","last_updated","table_id"],"keyColumnDirections":["ASC","DESC","ASC"],"keyColumnIds":[16,15,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{"isSharded":true,"name":"crdb_internal_last_updated_table_id_shard_16","shardBuckets":16,"columnNames":["last_updated","table_id"]},"geoConfig":{}},{"name":"db_name_gin","id":8,"version":3,"keyColumnNames":["db_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[3],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"table_name_gin","id":9,"version":3,"keyColumnNames":["table_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[5],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"schema_name_gin","id":10,"version":3,"keyColumnNames":["schema_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[4],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"store_ids_gin","id":11,"version":3,"keyColumnNames":["store_ids"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["DEFAULT"],"keyColumnIds":[8],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}}],"nextIndexId":12,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_last_updated_table_id_shard_16 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_last_updated_table_id_shard_16","columnIds":[16],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"table_statistics","id":20,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tableID","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"statisticID","id":2,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"name","id":3,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"columnIDs","id":4,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}}},{"name":"createdAt","id":5,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"rowCount","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"distinctCount","id":7,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"nullCount","id":8,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"histogram","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"avgSize","id":10,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"_:::INT8"},{"name":"partialPredicate","id":11,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"fullStatisticID","id":12,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true}],"nextColumnId":13,"families":[{"name":"fam_0_tableID_statisticID_name_columnIDs_createdAt_rowCount_distinctCount_nullCount_histogram","columnNames":["tableID","statisticID","name","columnIDs","createdAt","rowCount","distinctCount","nullCount","histogram","avgSize","partialPredicate","fullStatisticID"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tableID","statisticID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["name","columnIDs","createdAt","rowCount","distinctCount","nullCount","histogram","avgSize","partialPredicate","fullStatisticID"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"tenant_id_seq","id":63,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} @@ -706,24 +737,22 @@ schema_telemetry snapshot_id=7cd8a9ae-f35c-4cd2-970a-757174600874 max_records=10 ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"table":{"name":"descriptor_id_seq","id":7,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"locations","id":21,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"localityKey","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"localityValue","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"latitude","id":3,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}},{"name":"longitude","id":4,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}}],"nextColumnId":5,"families":[{"name":"fam_0_localityKey_localityValue_latitude_longitude","columnNames":["localityKey","localityValue","latitude","longitude"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["localityKey","localityValue"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["latitude","longitude"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"role_id_seq","id":48,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"800","withGrantOption":"800"},{"userProto":"root","privileges":"800","withGrantOption":"800"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"100","maxValue":"2147483647","start":"100","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"eventlog","id":12,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"timestamp","id":1,"type":{"family":"TimestampFamily","oid":1114}},{"name":"eventType","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"targetID","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"reportingID","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"info","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"uniqueID","id":6,"type":{"family":"BytesFamily","oid":17},"defaultExpr":"uuid_v4()"}],"nextColumnId":7,"families":[{"name":"primary","columnNames":["timestamp","uniqueID"],"columnIds":[1,6]},{"name":"fam_2_eventType","id":2,"columnNames":["eventType"],"columnIds":[2],"defaultColumnId":2},{"name":"fam_3_targetID","id":3,"columnNames":["targetID"],"columnIds":[3],"defaultColumnId":3},{"name":"fam_4_reportingID","id":4,"columnNames":["reportingID"],"columnIds":[4],"defaultColumnId":4},{"name":"fam_5_info","id":5,"columnNames":["info"],"columnIds":[5],"defaultColumnId":5}],"nextFamilyId":6,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["timestamp","uniqueID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["eventType","targetID","reportingID","info"],"keyColumnIds":[1,6],"storeColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"join_tokens","id":41,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950}},{"name":"secret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":3,"type":{"family":"TimestampTZFamily","oid":1184}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","secret","expiration"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["secret","expiration"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"transaction_statistics","id":43,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":5,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":6,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","id":8,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id)), _:::INT8)"},{"name":"execution_count","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":10,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":11,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":13,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":14,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id","agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[8,1,2,3,4,5,6,7,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[8,1,2,3,4],"storeColumnIds":[5,6,7,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[8,1,3,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,9],"keySuffixColumnIds":[8,2,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,10],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[10],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,11],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,12],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,13],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,14],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","columnIds":[8],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"tenant_tasks","id":60,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"issuer","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"task_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"created","id":4,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"payload_id","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"owner","id":6,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":7,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["tenant_id","issuer","task_id","created","payload_id","owner","owner_id"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","issuer","task_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["created","payload_id","owner","owner_id"],"keyColumnIds":[1,2,3],"storeColumnIds":[4,5,6,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"tenant_usage","id":45,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"instance_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"next_instance_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"last_update","id":4,"type":{"family":"TimestampFamily","oid":1114}},{"name":"ru_burst_limit","id":5,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_refill_rate","id":6,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_current","id":7,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_share_sum","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"total_consumption","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_lease","id":10,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_seq","id":11,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"instance_shares","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_rates","id":13,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"next_rates","id":14,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["tenant_id","instance_id","next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","instance_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"excludeDataFromBackup":true,"nextConstraintId":2}} {"table":{"name":"web_sessions","id":19,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"hashedSecret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"username","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"createdAt","id":4,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"expiresAt","id":5,"type":{"family":"TimestampFamily","oid":1114}},{"name":"revokedAt","id":6,"type":{"family":"TimestampFamily","oid":1114},"nullable":true},{"name":"lastUsedAt","id":7,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"auditInfo","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_id","id":9,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":10,"families":[{"name":"fam_0_id_hashedSecret_username_createdAt_expiresAt_revokedAt_lastUsedAt_auditInfo","columnNames":["id","hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"columnIds":[1,2,3,4,5,6,7,8,9]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"web_sessions_expiresAt_idx","id":2,"version":3,"keyColumnNames":["expiresAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[5],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_createdAt_idx","id":3,"version":3,"keyColumnNames":["createdAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[4],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_revokedAt_idx","id":4,"version":3,"keyColumnNames":["revokedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[6],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_lastUsedAt_idx","id":5,"version":3,"keyColumnNames":["lastUsedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[7],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"zones","id":5,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"config","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_config","id":2,"columnNames":["config"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["config"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"schema":{"name":"public","id":103,"modificationTime":{"wallTime":"0"},"version":"1","parentId":102,"privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"516"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3}}} schema_telemetry snapshot_id=7cd8a9ae-f35c-4cd2-970a-757174600874 max_records=10 ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"table":{"name":"descriptor_id_seq","id":7,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"locations","id":21,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"localityKey","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"localityValue","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"latitude","id":3,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}},{"name":"longitude","id":4,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}}],"nextColumnId":5,"families":[{"name":"fam_0_localityKey_localityValue_latitude_longitude","columnNames":["localityKey","localityValue","latitude","longitude"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["localityKey","localityValue"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["latitude","longitude"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"role_id_seq","id":48,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"800","withGrantOption":"800"},{"userProto":"root","privileges":"800","withGrantOption":"800"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"100","maxValue":"2147483647","start":"100","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"eventlog","id":12,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"timestamp","id":1,"type":{"family":"TimestampFamily","oid":1114}},{"name":"eventType","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"targetID","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"reportingID","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"info","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"uniqueID","id":6,"type":{"family":"BytesFamily","oid":17},"defaultExpr":"uuid_v4()"}],"nextColumnId":7,"families":[{"name":"primary","columnNames":["timestamp","uniqueID"],"columnIds":[1,6]},{"name":"fam_2_eventType","id":2,"columnNames":["eventType"],"columnIds":[2],"defaultColumnId":2},{"name":"fam_3_targetID","id":3,"columnNames":["targetID"],"columnIds":[3],"defaultColumnId":3},{"name":"fam_4_reportingID","id":4,"columnNames":["reportingID"],"columnIds":[4],"defaultColumnId":4},{"name":"fam_5_info","id":5,"columnNames":["info"],"columnIds":[5],"defaultColumnId":5}],"nextFamilyId":6,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["timestamp","uniqueID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["eventType","targetID","reportingID","info"],"keyColumnIds":[1,6],"storeColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"join_tokens","id":41,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950}},{"name":"secret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":3,"type":{"family":"TimestampTZFamily","oid":1184}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","secret","expiration"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["secret","expiration"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"transaction_statistics","id":43,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":5,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":6,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","id":8,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id)), _:::INT8)"},{"name":"execution_count","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":10,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":11,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":13,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":14,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id","agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[8,1,2,3,4,5,6,7,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[8,1,2,3,4],"storeColumnIds":[5,6,7,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[8,1,3,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,9],"keySuffixColumnIds":[8,2,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,10],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[10],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,11],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,12],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,13],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,14],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","columnIds":[8],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"tenant_tasks","id":60,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"issuer","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"task_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"created","id":4,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"payload_id","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"owner","id":6,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":7,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["tenant_id","issuer","task_id","created","payload_id","owner","owner_id"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","issuer","task_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["created","payload_id","owner","owner_id"],"keyColumnIds":[1,2,3],"storeColumnIds":[4,5,6,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"tenant_usage","id":45,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"instance_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"next_instance_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"last_update","id":4,"type":{"family":"TimestampFamily","oid":1114}},{"name":"ru_burst_limit","id":5,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_refill_rate","id":6,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_current","id":7,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_share_sum","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"total_consumption","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_lease","id":10,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_seq","id":11,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"instance_shares","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_rates","id":13,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"next_rates","id":14,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["tenant_id","instance_id","next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","instance_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"excludeDataFromBackup":true,"nextConstraintId":2}} {"table":{"name":"web_sessions","id":19,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"hashedSecret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"username","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"createdAt","id":4,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"expiresAt","id":5,"type":{"family":"TimestampFamily","oid":1114}},{"name":"revokedAt","id":6,"type":{"family":"TimestampFamily","oid":1114},"nullable":true},{"name":"lastUsedAt","id":7,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"auditInfo","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_id","id":9,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":10,"families":[{"name":"fam_0_id_hashedSecret_username_createdAt_expiresAt_revokedAt_lastUsedAt_auditInfo","columnNames":["id","hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"columnIds":[1,2,3,4,5,6,7,8,9]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"web_sessions_expiresAt_idx","id":2,"version":3,"keyColumnNames":["expiresAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[5],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_createdAt_idx","id":3,"version":3,"keyColumnNames":["createdAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[4],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_revokedAt_idx","id":4,"version":3,"keyColumnNames":["revokedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[6],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_lastUsedAt_idx","id":5,"version":3,"keyColumnNames":["lastUsedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[7],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"zones","id":5,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"config","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_config","id":2,"columnNames":["config"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["config"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"schema":{"name":"public","id":103,"modificationTime":{"wallTime":"0"},"version":"1","parentId":102,"privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"516"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3}}} diff --git a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant index abad86c3336b..3ebc93a6c24a 100644 --- a/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant +++ b/pkg/sql/catalog/systemschema_test/testdata/bootstrap_tenant @@ -412,6 +412,7 @@ CREATE TABLE public.sql_instances ( sql_addr STRING NULL, crdb_region BYTES NOT NULL, binary_version STRING NULL, + is_draining BOOL NULL, CONSTRAINT "primary" PRIMARY KEY (crdb_region ASC, id ASC) ); CREATE TABLE public.span_configurations ( @@ -637,12 +638,41 @@ CREATE TABLE public.statement_execution_insights ( INDEX statement_fingerprint_id_idx (statement_fingerprint_id ASC, start_time DESC, end_time DESC), INDEX time_range_idx (start_time DESC, end_time DESC) USING HASH WITH (bucket_count=16) ); +CREATE TABLE public.table_metadata ( + db_id INT8 NOT NULL, + table_id INT8 NOT NULL, + db_name STRING NOT NULL, + schema_name STRING NOT NULL, + table_name STRING NOT NULL, + total_columns INT8 NOT NULL, + total_indexes INT8 NOT NULL, + store_ids INT8[] NOT NULL, + replication_size_bytes INT8 NOT NULL, + total_ranges INT8 NOT NULL, + total_live_data_bytes INT8 NOT NULL, + total_data_bytes INT8 NOT NULL, + perc_live_data FLOAT8 NOT NULL, + last_update_error STRING NULL, + last_updated TIMESTAMPTZ NOT NULL DEFAULT now():::TIMESTAMPTZ, + crdb_internal_last_updated_table_id_shard_16 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), 16:::INT8)) VIRTUAL, + CONSTRAINT "primary" PRIMARY KEY (db_id ASC, table_id ASC), + INDEX replication_size_bytes_table_id_idx (replication_size_bytes DESC, table_id ASC), + INDEX total_ranges_table_id_idx (total_ranges DESC, table_id ASC), + INDEX total_columns_table_id_idx (total_columns DESC, table_id ASC), + INDEX total_indexes_table_id_idx (total_indexes DESC, table_id ASC), + INDEX perc_live_data_id_idx (perc_live_data DESC, table_id ASC), + INDEX last_updated_idx (last_updated DESC, table_id ASC) USING HASH WITH (bucket_count=16), + INVERTED INDEX db_name_gin (db_name gin_trgm_ops), + INVERTED INDEX table_name_gin (table_name gin_trgm_ops), + INVERTED INDEX schema_name_gin (schema_name gin_trgm_ops), + INVERTED INDEX store_ids_gin (store_ids) +); schema_telemetry ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"database":{"name":"postgres","id":102,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":103}},"defaultPrivileges":{}}} -{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":6}}} +{"database":{"name":"system","id":1,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2048","withGrantOption":"2048"},{"userProto":"root","privileges":"2048","withGrantOption":"2048"}],"ownerProto":"node","version":3},"systemDatabaseSchemaVersion":{"majorVal":1000024,"minorVal":2,"internal":14}}} {"table":{"name":"comments","id":24,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"type","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"object_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"sub_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"comment","id":4,"type":{"family":"StringFamily","oid":25}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["type","object_id","sub_id"],"columnIds":[1,2,3]},{"name":"fam_4_comment","id":4,"columnNames":["comment"],"columnIds":[4],"defaultColumnId":4}],"nextFamilyId":5,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["type","object_id","sub_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["comment"],"keyColumnIds":[1,2,3],"storeColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"public","privileges":"32"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"database_role_settings","id":44,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"database_id","id":1,"type":{"family":"OidFamily","oid":26}},{"name":"role_name","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"settings","id":3,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}}},{"name":"role_id","id":4,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":5,"families":[{"name":"primary","columnNames":["database_id","role_name","settings","role_id"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["database_id","role_name"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings","role_id"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"database_role_settings_database_id_role_id_key","id":2,"unique":true,"version":3,"keyColumnNames":["database_id","role_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["settings"],"keyColumnIds":[1,4],"keySuffixColumnIds":[2],"storeColumnIds":[3],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"descriptor","id":3,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"descriptor","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_descriptor","id":2,"columnNames":["descriptor"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["descriptor"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} @@ -677,7 +707,7 @@ schema_telemetry {"table":{"name":"span_stats_samples","id":57,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"sample_time","id":2,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","sample_time"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["sample_time"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"samples_sample_time_idx","id":2,"unique":true,"version":3,"keyColumnNames":["sample_time"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"span_stats_tenant_boundaries","id":58,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"boundaries","id":2,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["tenant_id","boundaries"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id"],"keyColumnDirections":["ASC"],"storeColumnNames":["boundaries"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} -{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"sqlliveness","id":39,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"session_id","id":1,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":2,"type":{"family":"DecimalFamily","oid":1700}},{"name":"crdb_region","id":3,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["crdb_region","session_id","expiration"],"columnIds":[3,1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","session_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["expiration"],"keyColumnIds":[3,1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"statement_activity","id":61,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"plan_hash","id":4,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"agg_interval","id":6,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":8,"type":{"family":"JsonFamily","oid":3802}},{"name":"plan","id":9,"type":{"family":"JsonFamily","oid":3802}},{"name":"index_recommendations","id":10,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"defaultExpr":"ARRAY[]:::STRING[]"},{"name":"execution_count","id":11,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"execution_total_seconds","id":12,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"execution_total_cluster_seconds","id":13,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"contention_time_avg_seconds","id":14,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"cpu_sql_avg_nanos","id":15,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"service_latency_avg_seconds","id":16,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"service_latency_p99_seconds","id":17,"type":{"family":"FloatFamily","width":64,"oid":701}}],"nextColumnId":18,"families":[{"name":"primary","columnNames":["aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","agg_interval","metadata","statistics","plan","index_recommendations","execution_count","execution_total_seconds","execution_total_cluster_seconds","contention_time_avg_seconds","cpu_sql_avg_nanos","service_latency_avg_seconds","service_latency_p99_seconds"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","plan","index_recommendations","execution_count","execution_total_seconds","execution_total_cluster_seconds","contention_time_avg_seconds","cpu_sql_avg_nanos","service_latency_avg_seconds","service_latency_p99_seconds"],"keyColumnIds":[1,2,3,4,5],"storeColumnIds":[6,7,8,9,10,11,12,13,14,15,16,17],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_id_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id","transaction_fingerprint_id"],"keyColumnDirections":["ASC","ASC"],"keyColumnIds":[2,3],"keySuffixColumnIds":[1,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","execution_count"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,11],"keySuffixColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_total_seconds_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","execution_total_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,12],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"contention_time_avg_seconds_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","contention_time_avg_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,14],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"cpu_sql_avg_nanos_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","cpu_sql_avg_nanos"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,15],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[15],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"service_latency_avg_seconds_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","service_latency_avg_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,16],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[16],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"service_latency_p99_seconds_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","service_latency_p99_seconds"],"keyColumnDirections":["ASC","DESC"],"keyColumnIds":[1,17],"keySuffixColumnIds":[2,3,4,5],"compositeColumnIds":[17],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"statement_bundle_chunks","id":34,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"description","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"data","id":3,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","description","data"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["description","data"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} @@ -685,6 +715,7 @@ schema_telemetry {"table":{"name":"statement_diagnostics_requests","id":35,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"completed","id":2,"type":{"oid":16},"defaultExpr":"false"},{"name":"statement_fingerprint","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"statement_diagnostics_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"requested_at","id":5,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"min_execution_latency","id":6,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}},"nullable":true},{"name":"expires_at","id":7,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"sampling_probability","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"plan_gist","id":9,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"anti_plan_gist","id":10,"type":{"oid":16},"nullable":true},{"name":"redacted","id":11,"type":{"oid":16},"defaultExpr":"false"}],"nextColumnId":12,"families":[{"name":"primary","columnNames":["id","completed","statement_fingerprint","statement_diagnostics_id","requested_at","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["completed","statement_fingerprint","statement_diagnostics_id","requested_at","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9,10,11],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"completed_idx","id":2,"version":3,"keyColumnNames":["completed","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["statement_fingerprint","min_execution_latency","expires_at","sampling_probability","plan_gist","anti_plan_gist","redacted"],"keyColumnIds":[2,1],"storeColumnIds":[3,6,7,8,9,10,11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"sampling_probability BETWEEN _:::FLOAT8 AND _:::FLOAT8","name":"check_sampling_probability","columnIds":[8],"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"statement_execution_insights","id":66,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"session_id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"transaction_id","id":2,"type":{"family":"UuidFamily","oid":2950}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"statement_id","id":4,"type":{"family":"StringFamily","oid":25}},{"name":"statement_fingerprint_id","id":5,"type":{"family":"BytesFamily","oid":17}},{"name":"problem","id":6,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"causes","id":7,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}},"nullable":true},{"name":"query","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"status","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"start_time","id":10,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"end_time","id":11,"type":{"family":"TimestampTZFamily","oid":1184},"nullable":true},{"name":"full_scan","id":12,"type":{"oid":16},"nullable":true},{"name":"user_name","id":13,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"app_name","id":14,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_priority","id":15,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"database_name","id":16,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"plan_gist","id":17,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"retries","id":18,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"last_retry_reason","id":19,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"execution_node_ids","id":20,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}},"nullable":true},{"name":"index_recommendations","id":21,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"nullable":true},{"name":"implicit_txn","id":22,"type":{"oid":16},"nullable":true},{"name":"cpu_sql_nanos","id":23,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"error_code","id":24,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"contention_time","id":25,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}},"nullable":true},{"name":"contention_info","id":26,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"details","id":27,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"created","id":28,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"crdb_internal_end_time_start_time_shard_16","id":29,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(md5(crdb_internal.datums_to_bytes(end_time, start_time))), _:::INT8)","virtual":true}],"nextColumnId":30,"families":[{"name":"primary","columnNames":["session_id","transaction_id","transaction_fingerprint_id","statement_id","statement_fingerprint_id","problem","causes","query","status","start_time","end_time","full_scan","user_name","app_name","user_priority","database_name","plan_gist","retries","last_retry_reason","execution_node_ids","index_recommendations","implicit_txn","cpu_sql_nanos","error_code","contention_time","contention_info","details","created"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["statement_id","transaction_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["session_id","transaction_fingerprint_id","statement_fingerprint_id","problem","causes","query","status","start_time","end_time","full_scan","user_name","app_name","user_priority","database_name","plan_gist","retries","last_retry_reason","execution_node_ids","index_recommendations","implicit_txn","cpu_sql_nanos","error_code","contention_time","contention_info","details","created"],"keyColumnIds":[4,2],"storeColumnIds":[1,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"transaction_id_idx","id":2,"version":3,"keyColumnNames":["transaction_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"transaction_fingerprint_id_idx","id":3,"version":3,"keyColumnNames":["transaction_fingerprint_id","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[3,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"statement_fingerprint_id_idx","id":4,"version":3,"keyColumnNames":["statement_fingerprint_id","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[5,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"time_range_idx","id":5,"version":3,"keyColumnNames":["crdb_internal_end_time_start_time_shard_16","start_time","end_time"],"keyColumnDirections":["ASC","DESC","DESC"],"keyColumnIds":[29,10,11],"keySuffixColumnIds":[4,2],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{"isSharded":true,"name":"crdb_internal_end_time_start_time_shard_16","shardBuckets":16,"columnNames":["end_time","start_time"]},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_end_time_start_time_shard_16 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_end_time_start_time_shard_16","columnIds":[29],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"statement_statistics","id":42,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"transaction_fingerprint_id","id":3,"type":{"family":"BytesFamily","oid":17}},{"name":"plan_hash","id":4,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":7,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":8,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":9,"type":{"family":"JsonFamily","oid":3802}},{"name":"plan","id":10,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","id":11,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id, plan_hash, transaction_fingerprint_id)), _:::INT8)"},{"name":"index_recommendations","id":12,"type":{"family":"ArrayFamily","arrayElemType":"StringFamily","oid":1009,"arrayContents":{"family":"StringFamily","oid":25}},"defaultExpr":"ARRAY[]:::STRING[]"},{"name":"indexes_usage","id":13,"type":{"family":"JsonFamily","oid":3802},"nullable":true,"computeExpr":"(statistics-\u003e'_':::STRING)-\u003e'_':::STRING","virtual":true},{"name":"execution_count","id":14,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":15,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":16,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":17,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":18,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":19,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":20,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","node_id","agg_interval","metadata","statistics","plan","index_recommendations","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[11,1,2,3,4,5,6,7,8,9,10,12,14,15,16,17,18,19]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","aggregated_ts","fingerprint_id","transaction_fingerprint_id","plan_hash","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","plan","index_recommendations","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[11,1,2,3,4,5,6],"storeColumnIds":[7,8,9,10,12,14,15,16,17,18,19],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id","plan_hash","transaction_fingerprint_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id","transaction_fingerprint_id"],"keyColumnDirections":["ASC","ASC"],"keyColumnIds":[2,3],"keySuffixColumnIds":[11,1,4,5,6],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"indexes_usage_idx","id":3,"version":3,"keyColumnNames":["indexes_usage"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["DEFAULT"],"keyColumnIds":[13],"keySuffixColumnIds":[11,1,2,3,4,5,6],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,14],"keySuffixColumnIds":[11,2,3,4,6],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,15],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[15],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,16],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[16],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,17],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[17],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,18],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[18],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":9,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,5,19],"keySuffixColumnIds":[11,2,3,4,6],"compositeColumnIds":[19],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":10,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8","columnIds":[11],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"table_metadata","id":67,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"db_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"table_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"db_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"schema_name","id":4,"type":{"family":"StringFamily","oid":25}},{"name":"table_name","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"total_columns","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_indexes","id":7,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"store_ids","id":8,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}}},{"name":"replication_size_bytes","id":9,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_ranges","id":10,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_live_data_bytes","id":11,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"total_data_bytes","id":12,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"perc_live_data","id":13,"type":{"family":"FloatFamily","width":64,"oid":701}},{"name":"last_update_error","id":14,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"last_updated","id":15,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"crdb_internal_last_updated_table_id_shard_16","id":16,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), _:::INT8)","virtual":true}],"nextColumnId":17,"families":[{"name":"primary","columnNames":["db_id","table_id","db_name","schema_name","table_name","total_columns","total_indexes","store_ids","replication_size_bytes","total_ranges","total_live_data_bytes","total_data_bytes","perc_live_data","last_update_error","last_updated"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["db_id","table_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["db_name","schema_name","table_name","total_columns","total_indexes","store_ids","replication_size_bytes","total_ranges","total_live_data_bytes","total_data_bytes","perc_live_data","last_update_error","last_updated"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14,15],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"replication_size_bytes_table_id_idx","id":2,"version":3,"keyColumnNames":["replication_size_bytes","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[9,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_ranges_table_id_idx","id":3,"version":3,"keyColumnNames":["total_ranges","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[10,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_columns_table_id_idx","id":4,"version":3,"keyColumnNames":["total_columns","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[6,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"total_indexes_table_id_idx","id":5,"version":3,"keyColumnNames":["total_indexes","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[7,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"perc_live_data_id_idx","id":6,"version":3,"keyColumnNames":["perc_live_data","table_id"],"keyColumnDirections":["DESC","ASC"],"keyColumnIds":[13,2],"keySuffixColumnIds":[1],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"last_updated_idx","id":7,"version":3,"keyColumnNames":["crdb_internal_last_updated_table_id_shard_16","last_updated","table_id"],"keyColumnDirections":["ASC","DESC","ASC"],"keyColumnIds":[16,15,2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{"isSharded":true,"name":"crdb_internal_last_updated_table_id_shard_16","shardBuckets":16,"columnNames":["last_updated","table_id"]},"geoConfig":{}},{"name":"db_name_gin","id":8,"version":3,"keyColumnNames":["db_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[3],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"table_name_gin","id":9,"version":3,"keyColumnNames":["table_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[5],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"schema_name_gin","id":10,"version":3,"keyColumnNames":["schema_name"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["TRIGRAM"],"keyColumnIds":[4],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}},{"name":"store_ids_gin","id":11,"version":3,"keyColumnNames":["store_ids"],"keyColumnDirections":["ASC"],"invertedColumnKinds":["DEFAULT"],"keyColumnIds":[8],"keySuffixColumnIds":[1,2],"foreignKey":{},"interleave":{},"partitioning":{},"type":"INVERTED","sharded":{},"geoConfig":{}}],"nextIndexId":12,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_last_updated_table_id_shard_16 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_last_updated_table_id_shard_16","columnIds":[16],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} {"table":{"name":"table_statistics","id":20,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tableID","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"statisticID","id":2,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"name","id":3,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"columnIDs","id":4,"type":{"family":"ArrayFamily","width":64,"arrayElemType":"IntFamily","oid":1016,"arrayContents":{"family":"IntFamily","width":64,"oid":20}}},{"name":"createdAt","id":5,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"rowCount","id":6,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"distinctCount","id":7,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"nullCount","id":8,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"histogram","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"avgSize","id":10,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"_:::INT8"},{"name":"partialPredicate","id":11,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"fullStatisticID","id":12,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true}],"nextColumnId":13,"families":[{"name":"fam_0_tableID_statisticID_name_columnIDs_createdAt_rowCount_distinctCount_nullCount_histogram","columnNames":["tableID","statisticID","name","columnIDs","createdAt","rowCount","distinctCount","nullCount","histogram","avgSize","partialPredicate","fullStatisticID"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tableID","statisticID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["name","columnIDs","createdAt","rowCount","distinctCount","nullCount","histogram","avgSize","partialPredicate","fullStatisticID"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"tenant_id_seq","id":63,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} @@ -706,24 +737,22 @@ schema_telemetry snapshot_id=7cd8a9ae-f35c-4cd2-970a-757174600874 max_records=10 ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"table":{"name":"descriptor_id_seq","id":7,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"locations","id":21,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"localityKey","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"localityValue","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"latitude","id":3,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}},{"name":"longitude","id":4,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}}],"nextColumnId":5,"families":[{"name":"fam_0_localityKey_localityValue_latitude_longitude","columnNames":["localityKey","localityValue","latitude","longitude"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["localityKey","localityValue"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["latitude","longitude"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"role_id_seq","id":48,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"800","withGrantOption":"800"},{"userProto":"root","privileges":"800","withGrantOption":"800"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"100","maxValue":"2147483647","start":"100","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"eventlog","id":12,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"timestamp","id":1,"type":{"family":"TimestampFamily","oid":1114}},{"name":"eventType","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"targetID","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"reportingID","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"info","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"uniqueID","id":6,"type":{"family":"BytesFamily","oid":17},"defaultExpr":"uuid_v4()"}],"nextColumnId":7,"families":[{"name":"primary","columnNames":["timestamp","uniqueID"],"columnIds":[1,6]},{"name":"fam_2_eventType","id":2,"columnNames":["eventType"],"columnIds":[2],"defaultColumnId":2},{"name":"fam_3_targetID","id":3,"columnNames":["targetID"],"columnIds":[3],"defaultColumnId":3},{"name":"fam_4_reportingID","id":4,"columnNames":["reportingID"],"columnIds":[4],"defaultColumnId":4},{"name":"fam_5_info","id":5,"columnNames":["info"],"columnIds":[5],"defaultColumnId":5}],"nextFamilyId":6,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["timestamp","uniqueID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["eventType","targetID","reportingID","info"],"keyColumnIds":[1,6],"storeColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"join_tokens","id":41,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950}},{"name":"secret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":3,"type":{"family":"TimestampTZFamily","oid":1184}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","secret","expiration"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["secret","expiration"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"transaction_statistics","id":43,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":5,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":6,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","id":8,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id)), _:::INT8)"},{"name":"execution_count","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":10,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":11,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":13,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":14,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id","agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[8,1,2,3,4,5,6,7,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[8,1,2,3,4],"storeColumnIds":[5,6,7,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[8,1,3,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,9],"keySuffixColumnIds":[8,2,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,10],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[10],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,11],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,12],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,13],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,14],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","columnIds":[8],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"tenant_tasks","id":60,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"issuer","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"task_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"created","id":4,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"payload_id","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"owner","id":6,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":7,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["tenant_id","issuer","task_id","created","payload_id","owner","owner_id"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","issuer","task_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["created","payload_id","owner","owner_id"],"keyColumnIds":[1,2,3],"storeColumnIds":[4,5,6,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"tenant_usage","id":45,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"instance_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"next_instance_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"last_update","id":4,"type":{"family":"TimestampFamily","oid":1114}},{"name":"ru_burst_limit","id":5,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_refill_rate","id":6,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_current","id":7,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_share_sum","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"total_consumption","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_lease","id":10,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_seq","id":11,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"instance_shares","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_rates","id":13,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"next_rates","id":14,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["tenant_id","instance_id","next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","instance_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"excludeDataFromBackup":true,"nextConstraintId":2}} {"table":{"name":"web_sessions","id":19,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"hashedSecret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"username","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"createdAt","id":4,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"expiresAt","id":5,"type":{"family":"TimestampFamily","oid":1114}},{"name":"revokedAt","id":6,"type":{"family":"TimestampFamily","oid":1114},"nullable":true},{"name":"lastUsedAt","id":7,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"auditInfo","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_id","id":9,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":10,"families":[{"name":"fam_0_id_hashedSecret_username_createdAt_expiresAt_revokedAt_lastUsedAt_auditInfo","columnNames":["id","hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"columnIds":[1,2,3,4,5,6,7,8,9]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"web_sessions_expiresAt_idx","id":2,"version":3,"keyColumnNames":["expiresAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[5],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_createdAt_idx","id":3,"version":3,"keyColumnNames":["createdAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[4],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_revokedAt_idx","id":4,"version":3,"keyColumnNames":["revokedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[6],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_lastUsedAt_idx","id":5,"version":3,"keyColumnNames":["lastUsedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[7],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"zones","id":5,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"config","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_config","id":2,"columnNames":["config"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["config"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"schema":{"name":"public","id":103,"modificationTime":{"wallTime":"0"},"version":"1","parentId":102,"privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"516"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3}}} schema_telemetry snapshot_id=7cd8a9ae-f35c-4cd2-970a-757174600874 max_records=10 ---- {"database":{"name":"defaultdb","id":100,"modificationTime":{"wallTime":"0"},"version":"1","privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"2048"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3},"schemas":{"public":{"id":101}},"defaultPrivileges":{}}} {"table":{"name":"descriptor_id_seq","id":7,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"1","maxValue":"9223372036854775807","start":"1","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"locations","id":21,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"localityKey","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"localityValue","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"latitude","id":3,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}},{"name":"longitude","id":4,"type":{"family":"DecimalFamily","width":15,"precision":18,"oid":1700}}],"nextColumnId":5,"families":[{"name":"fam_0_localityKey_localityValue_latitude_longitude","columnNames":["localityKey","localityValue","latitude","longitude"],"columnIds":[1,2,3,4]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["localityKey","localityValue"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["latitude","longitude"],"keyColumnIds":[1,2],"storeColumnIds":[3,4],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"role_id_seq","id":48,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"value","id":1,"type":{"family":"IntFamily","width":64,"oid":20}}],"families":[{"name":"primary","columnNames":["value"],"columnIds":[1],"defaultColumnId":1}],"primaryIndex":{"name":"primary","id":1,"version":4,"keyColumnNames":["value"],"keyColumnDirections":["ASC"],"keyColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{}},"privileges":{"users":[{"userProto":"admin","privileges":"800","withGrantOption":"800"},{"userProto":"root","privileges":"800","withGrantOption":"800"}],"ownerProto":"node","version":3},"formatVersion":3,"sequenceOpts":{"increment":"1","minValue":"100","maxValue":"2147483647","start":"100","sequenceOwner":{},"cacheSize":"1"},"replacementOf":{"time":{}},"createAsOfTime":{}}} -{"table":{"name":"span_stats_unique_keys","id":55,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950},"defaultExpr":"gen_random_uuid()"},{"name":"key_bytes","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id","key_bytes"],"columnIds":[1,2],"defaultColumnId":2}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["key_bytes"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":2},"indexes":[{"name":"unique_keys_key_bytes_idx","id":2,"unique":true,"version":3,"keyColumnNames":["key_bytes"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"constraintId":1}],"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"eventlog","id":12,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"timestamp","id":1,"type":{"family":"TimestampFamily","oid":1114}},{"name":"eventType","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"targetID","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"reportingID","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"info","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"uniqueID","id":6,"type":{"family":"BytesFamily","oid":17},"defaultExpr":"uuid_v4()"}],"nextColumnId":7,"families":[{"name":"primary","columnNames":["timestamp","uniqueID"],"columnIds":[1,6]},{"name":"fam_2_eventType","id":2,"columnNames":["eventType"],"columnIds":[2],"defaultColumnId":2},{"name":"fam_3_targetID","id":3,"columnNames":["targetID"],"columnIds":[3],"defaultColumnId":3},{"name":"fam_4_reportingID","id":4,"columnNames":["reportingID"],"columnIds":[4],"defaultColumnId":4},{"name":"fam_5_info","id":5,"columnNames":["info"],"columnIds":[5],"defaultColumnId":5}],"nextFamilyId":6,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["timestamp","uniqueID"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["eventType","targetID","reportingID","info"],"keyColumnIds":[1,6],"storeColumnIds":[2,3,4,5],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"join_tokens","id":41,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"UuidFamily","oid":2950}},{"name":"secret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"expiration","id":3,"type":{"family":"TimestampTZFamily","oid":1184}}],"nextColumnId":4,"families":[{"name":"primary","columnNames":["id","secret","expiration"],"columnIds":[1,2,3]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["secret","expiration"],"keyColumnIds":[1],"storeColumnIds":[2,3],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"sql_instances","id":46,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"addr","id":2,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"session_id","id":3,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"locality","id":4,"type":{"family":"JsonFamily","oid":3802},"nullable":true},{"name":"sql_addr","id":5,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"crdb_region","id":6,"type":{"family":"BytesFamily","oid":17}},{"name":"binary_version","id":7,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"is_draining","id":8,"type":{"oid":16},"nullable":true}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","addr","session_id","locality","sql_addr","crdb_region","binary_version","is_draining"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":2,"unique":true,"version":4,"keyColumnNames":["crdb_region","id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["addr","session_id","locality","sql_addr","binary_version","is_draining"],"keyColumnIds":[6,1],"storeColumnIds":[2,3,4,5,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":3,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} {"table":{"name":"task_payloads","id":59,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"StringFamily","oid":25}},{"name":"created","id":2,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"owner","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":4,"type":{"family":"OidFamily","oid":26}},{"name":"min_version","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"description","id":6,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"type","id":7,"type":{"family":"StringFamily","oid":25}},{"name":"value","id":8,"type":{"family":"BytesFamily","oid":17}}],"nextColumnId":9,"families":[{"name":"primary","columnNames":["id","created","owner","owner_id","min_version","description","type","value"],"columnIds":[1,2,3,4,5,6,7,8]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["created","owner","owner_id","min_version","description","type","value"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"transaction_statistics","id":43,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"aggregated_ts","id":1,"type":{"family":"TimestampTZFamily","oid":1184}},{"name":"fingerprint_id","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"app_name","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"node_id","id":4,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"agg_interval","id":5,"type":{"family":"IntervalFamily","oid":1186,"intervalDurationField":{}}},{"name":"metadata","id":6,"type":{"family":"JsonFamily","oid":3802}},{"name":"statistics","id":7,"type":{"family":"JsonFamily","oid":3802}},{"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","id":8,"type":{"family":"IntFamily","width":32,"oid":23},"hidden":true,"computeExpr":"mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id)), _:::INT8)"},{"name":"execution_count","id":9,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)::INT8"},{"name":"service_latency","id":10,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"cpu_sql_nanos","id":11,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"contention_time","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"},{"name":"total_estimated_execution_time","id":13,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"((statistics-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8 * (((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e\u003e'_':::STRING)::FLOAT8"},{"name":"p99_latency","id":14,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true,"computeExpr":"(((statistics-\u003e'_':::STRING)-\u003e'_':::STRING)-\u003e'_':::STRING)::FLOAT8"}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id","agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"columnIds":[8,1,2,3,4,5,6,7,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","aggregated_ts","fingerprint_id","app_name","node_id"],"keyColumnDirections":["ASC","ASC","ASC","ASC","ASC"],"storeColumnNames":["agg_interval","metadata","statistics","execution_count","service_latency","cpu_sql_nanos","contention_time","total_estimated_execution_time","p99_latency"],"keyColumnIds":[8,1,2,3,4],"storeColumnIds":[5,6,7,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{"isSharded":true,"name":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","shardBuckets":8,"columnNames":["aggregated_ts","app_name","fingerprint_id","node_id"]},"geoConfig":{},"constraintId":1},"indexes":[{"name":"fingerprint_stats_idx","id":2,"version":3,"keyColumnNames":["fingerprint_id"],"keyColumnDirections":["ASC"],"keyColumnIds":[2],"keySuffixColumnIds":[8,1,3,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"execution_count_idx","id":3,"version":3,"keyColumnNames":["aggregated_ts","app_name","execution_count"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,9],"keySuffixColumnIds":[8,2,4],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"service_latency_idx","id":4,"version":3,"keyColumnNames":["aggregated_ts","app_name","service_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,10],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[10],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"cpu_sql_nanos_idx","id":5,"version":3,"keyColumnNames":["aggregated_ts","app_name","cpu_sql_nanos"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,11],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[11],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"contention_time_idx","id":6,"version":3,"keyColumnNames":["aggregated_ts","app_name","contention_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,12],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[12],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"total_estimated_execution_time_idx","id":7,"version":3,"keyColumnNames":["aggregated_ts","app_name","total_estimated_execution_time"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,13],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[13],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"},{"name":"p99_latency_idx","id":8,"version":3,"keyColumnNames":["aggregated_ts","app_name","p99_latency"],"keyColumnDirections":["ASC","ASC","DESC"],"keyColumnIds":[1,3,14],"keySuffixColumnIds":[8,2,4],"compositeColumnIds":[14],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{},"predicate":"app_name NOT LIKE '_':::STRING"}],"nextIndexId":9,"privileges":{"users":[{"userProto":"admin","privileges":"32","withGrantOption":"32"},{"userProto":"root","privileges":"32","withGrantOption":"32"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"checks":[{"expr":"crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN (_:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8, _:::INT8)","name":"check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8","columnIds":[8],"fromHashShardedColumn":true,"constraintId":2}],"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":3}} +{"table":{"name":"tenant_tasks","id":60,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"issuer","id":2,"type":{"family":"StringFamily","oid":25}},{"name":"task_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"created","id":4,"type":{"family":"TimestampTZFamily","oid":1184},"defaultExpr":"now():::TIMESTAMPTZ"},{"name":"payload_id","id":5,"type":{"family":"StringFamily","oid":25}},{"name":"owner","id":6,"type":{"family":"StringFamily","oid":25}},{"name":"owner_id","id":7,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":8,"families":[{"name":"primary","columnNames":["tenant_id","issuer","task_id","created","payload_id","owner","owner_id"],"columnIds":[1,2,3,4,5,6,7]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","issuer","task_id"],"keyColumnDirections":["ASC","ASC","ASC"],"storeColumnNames":["created","payload_id","owner","owner_id"],"keyColumnIds":[1,2,3],"storeColumnIds":[4,5,6,7],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} +{"table":{"name":"tenant_usage","id":45,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"tenant_id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"instance_id","id":2,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"next_instance_id","id":3,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"last_update","id":4,"type":{"family":"TimestampFamily","oid":1114}},{"name":"ru_burst_limit","id":5,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_refill_rate","id":6,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"ru_current","id":7,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_share_sum","id":8,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"total_consumption","id":9,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_lease","id":10,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"instance_seq","id":11,"type":{"family":"IntFamily","width":64,"oid":20},"nullable":true},{"name":"instance_shares","id":12,"type":{"family":"FloatFamily","width":64,"oid":701},"nullable":true},{"name":"current_rates","id":13,"type":{"family":"BytesFamily","oid":17},"nullable":true},{"name":"next_rates","id":14,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":15,"families":[{"name":"primary","columnNames":["tenant_id","instance_id","next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"columnIds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["tenant_id","instance_id"],"keyColumnDirections":["ASC","ASC"],"storeColumnNames":["next_instance_id","last_update","ru_burst_limit","ru_refill_rate","ru_current","current_share_sum","total_consumption","instance_lease","instance_seq","instance_shares","current_rates","next_rates"],"keyColumnIds":[1,2],"storeColumnIds":[3,4,5,6,7,8,9,10,11,12,13,14],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"excludeDataFromBackup":true,"nextConstraintId":2}} {"table":{"name":"web_sessions","id":19,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20},"defaultExpr":"unique_rowid()"},{"name":"hashedSecret","id":2,"type":{"family":"BytesFamily","oid":17}},{"name":"username","id":3,"type":{"family":"StringFamily","oid":25}},{"name":"createdAt","id":4,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"expiresAt","id":5,"type":{"family":"TimestampFamily","oid":1114}},{"name":"revokedAt","id":6,"type":{"family":"TimestampFamily","oid":1114},"nullable":true},{"name":"lastUsedAt","id":7,"type":{"family":"TimestampFamily","oid":1114},"defaultExpr":"now():::TIMESTAMP"},{"name":"auditInfo","id":8,"type":{"family":"StringFamily","oid":25},"nullable":true},{"name":"user_id","id":9,"type":{"family":"OidFamily","oid":26}}],"nextColumnId":10,"families":[{"name":"fam_0_id_hashedSecret_username_createdAt_expiresAt_revokedAt_lastUsedAt_auditInfo","columnNames":["id","hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"columnIds":[1,2,3,4,5,6,7,8,9]}],"nextFamilyId":1,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["hashedSecret","username","createdAt","expiresAt","revokedAt","lastUsedAt","auditInfo","user_id"],"keyColumnIds":[1],"storeColumnIds":[2,3,4,5,6,7,8,9],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"indexes":[{"name":"web_sessions_expiresAt_idx","id":2,"version":3,"keyColumnNames":["expiresAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[5],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_createdAt_idx","id":3,"version":3,"keyColumnNames":["createdAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[4],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_revokedAt_idx","id":4,"version":3,"keyColumnNames":["revokedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[6],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}},{"name":"web_sessions_lastUsedAt_idx","id":5,"version":3,"keyColumnNames":["lastUsedAt"],"keyColumnDirections":["ASC"],"keyColumnIds":[7],"keySuffixColumnIds":[1],"foreignKey":{},"interleave":{},"partitioning":{},"sharded":{},"geoConfig":{}}],"nextIndexId":6,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"table":{"name":"zones","id":5,"version":"1","modificationTime":{},"parentId":1,"unexposedParentSchemaId":29,"columns":[{"name":"id","id":1,"type":{"family":"IntFamily","width":64,"oid":20}},{"name":"config","id":2,"type":{"family":"BytesFamily","oid":17},"nullable":true}],"nextColumnId":3,"families":[{"name":"primary","columnNames":["id"],"columnIds":[1]},{"name":"fam_2_config","id":2,"columnNames":["config"],"columnIds":[2],"defaultColumnId":2}],"nextFamilyId":3,"primaryIndex":{"name":"primary","id":1,"unique":true,"version":4,"keyColumnNames":["id"],"keyColumnDirections":["ASC"],"storeColumnNames":["config"],"keyColumnIds":[1],"storeColumnIds":[2],"foreignKey":{},"interleave":{},"partitioning":{},"encodingType":1,"sharded":{},"geoConfig":{},"constraintId":1},"nextIndexId":2,"privileges":{"users":[{"userProto":"admin","privileges":"480","withGrantOption":"480"},{"userProto":"root","privileges":"480","withGrantOption":"480"}],"ownerProto":"node","version":3},"nextMutationId":1,"formatVersion":3,"replacementOf":{"time":{}},"createAsOfTime":{},"nextConstraintId":2}} -{"schema":{"name":"public","id":103,"modificationTime":{"wallTime":"0"},"version":"1","parentId":102,"privileges":{"users":[{"userProto":"admin","privileges":"2","withGrantOption":"2"},{"userProto":"public","privileges":"516"},{"userProto":"root","privileges":"2","withGrantOption":"2"}],"ownerProto":"root","version":3}}} diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index 0552ae55a371..68ae10af7702 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -2453,6 +2453,21 @@ func (desc *wrapper) GetStorageParams(spaceBetweenEqual bool) []string { appendStorageParam(catpb.AutoStatsFractionStaleTableSettingName, fmt.Sprintf("%g", value)) } + if settings.PartialEnabled != nil { + value := *settings.PartialEnabled + appendStorageParam(catpb.AutoPartialStatsEnabledTableSettingName, + fmt.Sprintf("%v", value)) + } + if settings.PartialMinStaleRows != nil { + value := *settings.PartialMinStaleRows + appendStorageParam(catpb.AutoPartialStatsMinStaleTableSettingName, + fmt.Sprintf("%d", value)) + } + if settings.PartialFractionStaleRows != nil { + value := *settings.PartialFractionStaleRows + appendStorageParam(catpb.AutoPartialStatsFractionStaleTableSettingName, + fmt.Sprintf("%g", value)) + } } if enabled, ok := desc.ForecastStatsEnabled(); ok { appendStorageParam(`sql_stats_forecasts_enabled`, strconv.FormatBool(enabled)) @@ -2500,6 +2515,13 @@ func (desc *wrapper) AutoStatsCollectionEnabled() catpb.AutoStatsCollectionStatu return desc.AutoStatsSettings.AutoStatsCollectionEnabled() } +func (desc *wrapper) AutoPartialStatsCollectionEnabled() catpb.AutoPartialStatsCollectionStatus { + if desc.AutoStatsSettings == nil { + return catpb.AutoPartialStatsCollectionNotSet + } + return desc.AutoStatsSettings.AutoPartialStatsCollectionEnabled() +} + // AutoStatsMinStaleRows implements the TableDescriptor interface. func (desc *wrapper) AutoStatsMinStaleRows() (minStaleRows int64, ok bool) { if desc.AutoStatsSettings == nil { diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go index 522d2da48a76..f8a7d8190be4 100644 --- a/pkg/sql/catalog/tabledesc/table_desc.go +++ b/pkg/sql/catalog/tabledesc/table_desc.go @@ -292,6 +292,22 @@ func UpdateIndexPartitioning( return true } +// NewPartitioning creates a new catalog.paritioning from the given +// partitioning descriptor. If the partitioning descriptor is nil, the resulting +// partitioning will have the default value for each entry. +func NewPartitioning(partDesc *catpb.PartitioningDescriptor) catalog.Partitioning { + if partDesc != nil { + return &partitioning{desc: partDesc} + } + partDesc = &catpb.PartitioningDescriptor{ + NumColumns: 0, + NumImplicitColumns: 0, + List: nil, + Range: nil, + } + return partitioning{desc: partDesc} +} + // GetPrimaryIndex implements the TableDescriptor interface. func (desc *wrapper) GetPrimaryIndex() catalog.UniqueWithIndexConstraint { return desc.getExistingOrNewIndexCache().primary @@ -339,7 +355,7 @@ func (desc *wrapper) NonDropIndexes() []catalog.Index { return desc.getExistingOrNewIndexCache().nonDrop } -// NonDropIndexes returns a slice of all partial indexes in the underlying +// PartialIndexes returns a slice of all partial indexes in the underlying // proto, in their canonical order. This is equivalent to taking the slice // produced by AllIndexes and filtering indexes with non-empty expressions. func (desc *wrapper) PartialIndexes() []catalog.Index { diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index 4cc36d4f725e..c91c237cbae6 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -1920,9 +1920,14 @@ func (desc *wrapper) validateAutoStatsSettings(vea catalog.ValidationErrorAccumu if desc.AutoStatsSettings == nil { return } - desc.validateAutoStatsEnabled(vea, desc.AutoStatsSettings.Enabled) - desc.validateMinStaleRows(vea, desc.AutoStatsSettings.MinStaleRows) - desc.validateFractionStaleRows(vea, desc.AutoStatsSettings.FractionStaleRows) + desc.validateAutoStatsEnabled(vea, catpb.AutoStatsEnabledTableSettingName, desc.AutoStatsSettings.Enabled) + desc.validateAutoStatsEnabled(vea, catpb.AutoPartialStatsEnabledTableSettingName, desc.AutoStatsSettings.PartialEnabled) + + desc.validateMinStaleRows(vea, catpb.AutoStatsMinStaleTableSettingName, desc.AutoStatsSettings.MinStaleRows) + desc.validateMinStaleRows(vea, catpb.AutoPartialStatsMinStaleTableSettingName, desc.AutoStatsSettings.PartialMinStaleRows) + + desc.validateFractionStaleRows(vea, catpb.AutoStatsFractionStaleTableSettingName, desc.AutoStatsSettings.FractionStaleRows) + desc.validateFractionStaleRows(vea, catpb.AutoPartialStatsFractionStaleTableSettingName, desc.AutoStatsSettings.PartialFractionStaleRows) } func (desc *wrapper) verifyProperTableForStatsSetting( @@ -1936,15 +1941,18 @@ func (desc *wrapper) verifyProperTableForStatsSetting( } } -func (desc *wrapper) validateAutoStatsEnabled(vea catalog.ValidationErrorAccumulator, value *bool) { +func (desc *wrapper) validateAutoStatsEnabled( + vea catalog.ValidationErrorAccumulator, settingName string, value *bool, +) { if value != nil { - desc.verifyProperTableForStatsSetting(vea, catpb.AutoStatsEnabledTableSettingName) + desc.verifyProperTableForStatsSetting(vea, settingName) } } -func (desc *wrapper) validateMinStaleRows(vea catalog.ValidationErrorAccumulator, value *int64) { +func (desc *wrapper) validateMinStaleRows( + vea catalog.ValidationErrorAccumulator, settingName string, value *int64, +) { if value != nil { - settingName := catpb.AutoStatsMinStaleTableSettingName desc.verifyProperTableForStatsSetting(vea, settingName) if *value < 0 { vea.Report(errors.Newf("invalid integer value for %s: cannot be set to a negative value: %d", settingName, *value)) @@ -1953,10 +1961,9 @@ func (desc *wrapper) validateMinStaleRows(vea catalog.ValidationErrorAccumulator } func (desc *wrapper) validateFractionStaleRows( - vea catalog.ValidationErrorAccumulator, value *float64, + vea catalog.ValidationErrorAccumulator, settingName string, value *float64, ) { if value != nil { - settingName := catpb.AutoStatsFractionStaleTableSettingName desc.verifyProperTableForStatsSetting(vea, settingName) if *value < 0 { vea.Report(errors.Newf("invalid float value for %s: cannot set to a negative value: %f", settingName, *value)) diff --git a/pkg/sql/catalog/tabledesc/validate_test.go b/pkg/sql/catalog/tabledesc/validate_test.go index e75f2e01e06e..fbb294dfe035 100644 --- a/pkg/sql/catalog/tabledesc/validate_test.go +++ b/pkg/sql/catalog/tabledesc/validate_test.go @@ -300,9 +300,12 @@ var validationMap = []struct { { obj: catpb.AutoStatsSettings{}, fieldMap: map[string]validationStatusInfo{ - "Enabled": {status: iSolemnlySwearThisFieldIsValidated}, - "MinStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, - "FractionStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, + "Enabled": {status: iSolemnlySwearThisFieldIsValidated}, + "MinStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, + "FractionStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, + "PartialEnabled": {status: iSolemnlySwearThisFieldIsValidated}, + "PartialMinStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, + "PartialFractionStaleRows": {status: iSolemnlySwearThisFieldIsValidated}, }, }, { @@ -330,6 +333,7 @@ var validationMap = []struct { "Version": {status: thisFieldReferencesNoObjects}, "DeclarativeSchemaChangerState": {status: thisFieldReferencesNoObjects}, "IsProcedure": {status: thisFieldReferencesNoObjects}, + "Security": {status: thisFieldReferencesNoObjects}, }, }, } @@ -2517,6 +2521,18 @@ func TestValidateTableDesc(t *testing.T) { NextColumnID: 2, AutoStatsSettings: &catpb.AutoStatsSettings{Enabled: &boolTrue}, }}, + {err: `Setting sql_stats_automatic_partial_collection_enabled may not be set on virtual table`, + desc: descpb.TableDescriptor{ + ID: catconstants.MinVirtualID, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.InterleavedFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + AutoStatsSettings: &catpb.AutoStatsSettings{PartialEnabled: &boolTrue}, + }}, {err: `Setting sql_stats_automatic_collection_enabled may not be set on a view or sequence`, desc: descpb.TableDescriptor{ Name: "bar", @@ -2582,6 +2598,18 @@ func TestValidateTableDesc(t *testing.T) { NextColumnID: 2, AutoStatsSettings: &catpb.AutoStatsSettings{MinStaleRows: &negativeOne}, }}, + {err: `invalid integer value for sql_stats_automatic_partial_collection_min_stale_rows: cannot be set to a negative value: -1`, + desc: descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.InterleavedFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + AutoStatsSettings: &catpb.AutoStatsSettings{PartialMinStaleRows: &negativeOne}, + }}, {err: `invalid float value for sql_stats_automatic_collection_fraction_stale_rows: cannot set to a negative value: -1.000000`, desc: descpb.TableDescriptor{ ID: 2, @@ -2594,6 +2622,18 @@ func TestValidateTableDesc(t *testing.T) { NextColumnID: 2, AutoStatsSettings: &catpb.AutoStatsSettings{FractionStaleRows: &negativeOneFloat}, }}, + {err: `invalid float value for sql_stats_automatic_partial_collection_fraction_stale_rows: cannot set to a negative value: -1.000000`, + desc: descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.InterleavedFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + AutoStatsSettings: &catpb.AutoStatsSettings{PartialFractionStaleRows: &negativeOneFloat}, + }}, {err: `row-level TTL expiration expression "missing_col" refers to unknown columns`, desc: descpb.TableDescriptor{ ID: 2, diff --git a/pkg/sql/colflow/colrpc/BUILD.bazel b/pkg/sql/colflow/colrpc/BUILD.bazel index 65df3cba4a72..0df705aac5a6 100644 --- a/pkg/sql/colflow/colrpc/BUILD.bazel +++ b/pkg/sql/colflow/colrpc/BUILD.bazel @@ -61,6 +61,7 @@ go_test( "//pkg/sql/colmem", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", + "//pkg/sql/flowinfra", "//pkg/sql/types", "//pkg/testutils", "//pkg/util/cancelchecker", diff --git a/pkg/sql/colflow/colrpc/colrpc_test.go b/pkg/sql/colflow/colrpc/colrpc_test.go index 302bec0fee51..2546d0ec08a1 100644 --- a/pkg/sql/colflow/colrpc/colrpc_test.go +++ b/pkg/sql/colflow/colrpc/colrpc_test.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/flowinfra" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/cancelchecker" @@ -135,7 +136,7 @@ func TestOutboxInbox(t *testing.T) { defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) require.NoError(t, err) // Generate a random cancellation scenario. @@ -490,7 +491,7 @@ func TestInboxHostCtxCancellation(t *testing.T) { defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) require.NoError(t, err) rng, _ := randutil.NewTestRand() @@ -578,7 +579,7 @@ func TestOutboxInboxMetadataPropagation(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, hlc.NewClockForTesting(nil), stopper, execinfra.StaticSQLInstanceID, ) require.NoError(t, err) @@ -773,7 +774,7 @@ func BenchmarkOutboxInbox(b *testing.B) { stopper := stop.NewStopper() defer stopper.Stop(ctx) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, hlc.NewClockForTesting(nil), stopper, execinfra.StaticSQLInstanceID, ) require.NoError(b, err) @@ -848,11 +849,11 @@ func TestOutboxStreamIDPropagation(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, hlc.NewClockForTesting(nil), stopper, execinfra.StaticSQLInstanceID, ) require.NoError(t, err) - dialer := &execinfrapb.MockDialer{Addr: addr} + dialer := &flowinfra.MockDialer{Addr: addr} defer dialer.Close() typs := []*types.T{types.Int} diff --git a/pkg/sql/colflow/vectorized_flow_shutdown_test.go b/pkg/sql/colflow/vectorized_flow_shutdown_test.go index 286b99f45f07..bc38f55066f3 100644 --- a/pkg/sql/colflow/vectorized_flow_shutdown_test.go +++ b/pkg/sql/colflow/vectorized_flow_shutdown_test.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/flowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -113,11 +114,11 @@ func TestVectorizedFlowShutdown(t *testing.T) { ctx := context.Background() stopper := stop.NewStopper() defer stopper.Stop(ctx) - _, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, + _, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, hlc.NewClockForTesting(nil), stopper, execinfra.StaticSQLInstanceID, ) require.NoError(t, err) - dialer := &execinfrapb.MockDialer{Addr: addr} + dialer := &flowinfra.MockDialer{Addr: addr} defer dialer.Close() queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(t, true /* inMem */) diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index f85d0316c3ce..4ced0cd218aa 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -415,7 +415,7 @@ type ServerMetrics struct { func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server { metrics := makeMetrics(false /* internal */) serverMetrics := makeServerMetrics(cfg) - insightsProvider := insights.New(cfg.Settings, serverMetrics.InsightsMetrics) + insightsProvider := insights.New(cfg.Settings, serverMetrics.InsightsMetrics, cfg.InsightsTestingKnobs) // TODO(117690): Unify StmtStatsEnable and TxnStatsEnable into a single cluster setting. sqlstats.TxnStatsEnable.SetOnChange(&cfg.Settings.SV, func(_ context.Context) { if !sqlstats.TxnStatsEnable.Get(&cfg.Settings.SV) { @@ -645,7 +645,7 @@ func (s *Server) GetIndexUsageStatsController() *idxusage.Controller { return s.indexUsageStatsController } -// GetInsightsReader returns the insights.Reader for the current sql.Server's +// GetInsightsReader returns the insights store for the current sql.Server's // detected execution insights. func (s *Server) GetInsightsReader() *insights.LockingStore { return s.insights.Store() @@ -1123,8 +1123,8 @@ func (s *Server) newConnExecutor( displayLevel := upgradedFrom if upgradedFrom == tree.ReadUncommittedIsolation { displayLevel = tree.ReadCommittedIsolation - } else if upgradedFrom == tree.RepeatableReadIsolation { - displayLevel = tree.SnapshotIsolation + } else if upgradedFrom == tree.SnapshotIsolation { + displayLevel = tree.RepeatableReadIsolation } if logIsolationLevelLimiter.ShouldLog() { log.Warningf(ctx, msgFmt, displayLevel) @@ -1267,7 +1267,7 @@ func (ex *connExecutor) close(ctx context.Context, closeType closeType) { } // Free any memory used by the stats collector. - ex.statsCollector.Free(ctx) + ex.statsCollector.Close(ctx, ex.planner.extendedEvalCtx.SessionID) var payloadErr error if closeType == normalClose { @@ -3529,13 +3529,14 @@ var allowReadCommittedIsolation = settings.RegisterBoolSetting( settings.WithPublic, ) -var allowSnapshotIsolation = settings.RegisterBoolSetting( +var allowRepeatableReadIsolation = settings.RegisterBoolSetting( settings.ApplicationLevel, "sql.txn.snapshot_isolation.enabled", - "set to true to allow transactions to use the SNAPSHOT isolation level. At "+ - "the time of writing, this setting is intended only for usage by "+ - "CockroachDB developers.", + "set to true to allow transactions to use the REPEATABLE READ isolation "+ + "level if specified by BEGIN/SET commands", false, + settings.WithName("sql.txn.repeatable_read_isolation.enabled"), + settings.WithPublic, ) var logIsolationLevelLimiter = log.Every(10 * time.Second) @@ -3563,56 +3564,17 @@ func (ex *connExecutor) txnIsolationLevelToKV( if level == tree.UnspecifiedIsolation { level = tree.IsolationLevel(ex.sessionData().DefaultTxnIsolationLevel) } - upgraded := false - upgradedDueToLicense := false + originalLevel := level + allowReadCommitted := allowReadCommittedIsolation.Get(&ex.server.cfg.Settings.SV) + allowRepeatableRead := allowRepeatableReadIsolation.Get(&ex.server.cfg.Settings.SV) hasLicense := base.CCLDistributionAndEnterpriseEnabled(ex.server.cfg.Settings) - ret := isolation.Serializable - switch level { - case tree.ReadUncommittedIsolation: - // READ UNCOMMITTED is mapped to READ COMMITTED. PostgreSQL also does - // this: https://www.postgresql.org/docs/current/transaction-iso.html. - upgraded = true - fallthrough - case tree.ReadCommittedIsolation: - // READ COMMITTED is only allowed if the cluster setting is enabled and - // the cluster has a license. Otherwise it is mapped to SERIALIZABLE. - allowReadCommitted := allowReadCommittedIsolation.Get(&ex.server.cfg.Settings.SV) - if allowReadCommitted && hasLicense { - ret = isolation.ReadCommitted - } else { - upgraded = true - ret = isolation.Serializable - if allowReadCommitted && !hasLicense { - upgradedDueToLicense = true - } - } - case tree.RepeatableReadIsolation: - // REPEATABLE READ is mapped to SNAPSHOT. - upgraded = true - fallthrough - case tree.SnapshotIsolation: - // SNAPSHOT is only allowed if the cluster setting is enabled and the - // cluster has a license. Otherwise it is mapped to SERIALIZABLE. - allowSnapshot := allowSnapshotIsolation.Get(&ex.server.cfg.Settings.SV) - if allowSnapshot && hasLicense { - ret = isolation.Snapshot - } else { - upgraded = true - ret = isolation.Serializable - if allowSnapshot && !hasLicense { - upgradedDueToLicense = true - } - } - case tree.SerializableIsolation: - ret = isolation.Serializable - default: - log.Fatalf(context.Background(), "unknown isolation level: %s", level) - } - + level, upgraded, upgradedDueToLicense := level.UpgradeToEnabledLevel( + allowReadCommitted, allowRepeatableRead, hasLicense) if f := ex.dataMutatorIterator.upgradedIsolationLevel; upgraded && f != nil { - f(ctx, level, upgradedDueToLicense) + f(ctx, originalLevel, upgradedDueToLicense) } + ret := level.ToKVIsoLevel() if ret != isolation.Serializable { telemetry.Inc(sqltelemetry.IsolationLevelCounter(ctx, ret)) } @@ -4254,7 +4216,7 @@ func (ex *connExecutor) serialize() serverpb.Session { Priority: ex.state.mu.priority.String(), QualityOfService: sessiondatapb.ToQoSLevelString(txn.AdmissionHeader().Priority), LastAutoRetryReason: autoRetryReasonStr, - IsolationLevel: tree.IsolationLevelFromKVTxnIsolationLevel(ex.state.mu.isolationLevel).String(), + IsolationLevel: tree.FromKVIsoLevel(ex.state.mu.isolationLevel).String(), } } diff --git a/pkg/sql/conn_executor_test.go b/pkg/sql/conn_executor_test.go index bebdd71d1730..7037d1b4ad50 100644 --- a/pkg/sql/conn_executor_test.go +++ b/pkg/sql/conn_executor_test.go @@ -428,7 +428,7 @@ func TestHalloweenProblemAvoidance(t *testing.T) { defer s.Stopper().Stop(context.Background()) for _, s := range []string{ - `SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true;`, + `SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true;`, `CREATE DATABASE t;`, `CREATE TABLE t.test (x FLOAT);`, } { @@ -439,7 +439,7 @@ func TestHalloweenProblemAvoidance(t *testing.T) { for _, isoLevel := range []tree.IsolationLevel{ tree.ReadCommittedIsolation, - tree.SnapshotIsolation, + tree.RepeatableReadIsolation, tree.SerializableIsolation, } { t.Run(isoLevel.String(), func(t *testing.T) { diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index e02c73ac9c6c..c7dc2d443f62 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -228,6 +228,7 @@ var crdbInternal = virtualSchema{ catconstants.CrdbInternalPCRStreamSpansTableID: crdbInternalPCRStreamSpansTable, catconstants.CrdbInternalPCRStreamCheckpointsTableID: crdbInternalPCRStreamCheckpointsTable, catconstants.CrdbInternalLDRProcessorTableID: crdbInternalLDRProcessorTable, + catconstants.CrdbInternalFullyQualifiedNamesViewID: crdbInternalFullyQualifiedNamesView, }, validWithNoDatabaseContext: true, } @@ -756,7 +757,7 @@ CREATE TABLE crdb_internal.table_row_statistics ( table_name STRING NOT NULL, estimated_row_count INT )`, - populate: func(ctx context.Context, p *planner, db catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { // Collect the statistics for all tables AS OF 10 seconds ago to avoid // contention on the stats table. We pass a nil transaction so that the AS // OF clause can be independent of any outer query. @@ -793,8 +794,10 @@ CREATE TABLE crdb_internal.table_row_statistics ( // Walk over all available tables and show row count for each of them // using collected statistics. - return forEachTableDescAll(ctx, p, db, virtualMany, - func(ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table tableID := tree.DInt(table.GetID()) rowCount := tree.DNull // For Virtual Tables report NULL row count. @@ -966,32 +969,14 @@ const ( // so we perform a LEFT JOIN to get a NULL value when no progress row is // found. systemJobsAndJobInfoBaseQuery = ` -WITH - latestpayload AS (SELECT job_id, value FROM system.job_info AS payload WHERE info_key = 'legacy_payload' ORDER BY written DESC), - latestprogress AS (SELECT job_id, value FROM system.job_info AS progress WHERE info_key = 'legacy_progress' ORDER BY written DESC) - SELECT - DISTINCT(id), status, created, payload.value AS payload, progress.value AS progress, - created_by_type, created_by_id, claim_session_id, claim_instance_id, num_runs, last_run, job_type - FROM system.jobs AS j - INNER JOIN latestpayload AS payload ON j.id = payload.job_id - LEFT JOIN latestprogress AS progress ON j.id = progress.job_id -` - - // systemJobsAndJobInfoBaseQueryWithIDPredicate is the same as - // systemJobsAndJobInfoBaseQuery but with a predicate on `job_id` in the CTE - // queries. - systemJobsAndJobInfoBaseQueryWithIDPredicate = ` -WITH - latestpayload AS (SELECT job_id, value FROM system.job_info AS payload WHERE info_key = 'legacy_payload' AND job_id = $1 ORDER BY written DESC LIMIT 1), - latestprogress AS (SELECT job_id, value FROM system.job_info AS progress WHERE info_key = 'legacy_progress' AND job_id = $1 ORDER BY written DESC LIMIT 1) - SELECT - id, status, created, payload.value AS payload, progress.value AS progress, - created_by_type, created_by_id, claim_session_id, claim_instance_id, num_runs, last_run, job_type - FROM system.jobs AS j - INNER JOIN latestpayload AS payload ON j.id = payload.job_id - LEFT JOIN latestprogress AS progress ON j.id = progress.job_id +SELECT +DISTINCT(id), status, created, payload.value AS payload, progress.value AS progress, +created_by_type, created_by_id, claim_session_id, claim_instance_id, num_runs, last_run, job_type +FROM +system.jobs AS j +LEFT JOIN system.job_info AS progress ON j.id = progress.job_id AND progress.info_key = 'legacy_progress' +INNER JOIN system.job_info AS payload ON j.id = payload.job_id AND payload.info_key = 'legacy_payload' ` - systemJobsIDPredicate = ` WHERE id = $1` systemJobsTypePredicate = ` WHERE job_type = $1` systemJobsStatusPredicate = ` WHERE status = $1` @@ -1011,7 +996,7 @@ func getInternalSystemJobsQuery(predicate systemJobsPredicate) string { case noPredicate: return systemJobsAndJobInfoBaseQuery case jobID: - return systemJobsAndJobInfoBaseQueryWithIDPredicate + systemJobsIDPredicate + return systemJobsAndJobInfoBaseQuery + systemJobsIDPredicate case jobType: return systemJobsAndJobInfoBaseQuery + systemJobsTypePredicate case jobStatus: @@ -1439,7 +1424,7 @@ const crdbInternalKVProtectedTSTableQuery = ` 'cockroach.protectedts.Target', target, false /* emit defaults */, - false /* include redaction marker */ + false /* include redaction marker */ /* NB: redactions in the debug zip are handled elsewhere by marking columns as sensitive */ ) as decoded_targets, crdb_internal_mvcc_timestamp @@ -1458,11 +1443,11 @@ CREATE TABLE crdb_internal.kv_protected_ts_records ( num_spans INT8 NOT NULL, spans BYTES NOT NULL, -- We do not decode this column since it is deprecated in 22.2+. verified BOOL NOT NULL, - target BYTES, + target BYTES, decoded_meta JSON, -- Decoded data from the meta column above. - -- This data can have different structures depending on the meta_type. + -- This data can have different structures depending on the meta_type. decoded_target JSON, -- Decoded data from the target column above. - internal_meta JSON, -- Additional metadata added by this virtual table (ex. job owner for job meta_type) + internal_meta JSON, -- Additional metadata added by this virtual table (ex. job owner for job meta_type) num_ranges INT, -- Number of ranges protected by this PTS record. last_updated DECIMAL -- crdb_internal_mvcc_timestamp of the row )`, @@ -2845,7 +2830,7 @@ func formatActiveQuery(query serverpb.ActiveQuery) string { ctx.Printf("$%d", p.Idx+1) return } - ctx.Printf(query.Placeholders[p.Idx]) + ctx.Printf("%s", query.Placeholders[p.Idx]) }), ) sb.WriteString(sql) @@ -3617,12 +3602,21 @@ CREATE TABLE crdb_internal.create_schema_statements ( ExplicitSchema: true, }, } + + createStatement := tree.AsString(node) + + comment, ok := p.Descriptors().GetSchemaComment(schemaDesc.GetID()) + if ok { + commentOnSchema := tree.CommentOnSchema{Comment: &comment, Name: tree.ObjectNamePrefix{SchemaName: tree.Name(schemaDesc.GetName()), ExplicitSchema: true}} + createStatement += ";\n" + tree.AsString(&commentOnSchema) + } + if err := addRow( tree.NewDInt(tree.DInt(db.GetID())), // database_id tree.NewDString(db.GetName()), // database_name tree.NewDString(schemaDesc.GetName()), // schema_name tree.NewDInt(tree.DInt(schemaDesc.GetID())), // descriptor_id (schema_id) - tree.NewDString(tree.AsString(node)), // create_statement + tree.NewDString(createStatement), // create_statement ); err != nil { return err } @@ -3949,8 +3943,10 @@ CREATE TABLE crdb_internal.table_columns ( const numDatums = 8 row := make(tree.Datums, numDatums) worker := func(ctx context.Context, pusher rowPusher) error { - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table tableID := tree.NewDInt(tree.DInt(table.GetID())) tableName := tree.NewDString(table.GetName()) columns := table.PublicColumns() @@ -4019,8 +4015,10 @@ CREATE TABLE crdb_internal.table_indexes ( row := make([]tree.Datum, numDatums) worker := func(ctx context.Context, pusher rowPusher) error { alloc := &tree.DatumAlloc{} - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, table := descCtx.schema, descCtx.table tableID := tree.NewDInt(tree.DInt(table.GetID())) tableName := tree.NewDString(table.GetName()) // We report the primary index of non-physical tables here. These @@ -4134,8 +4132,10 @@ CREATE TABLE crdb_internal.index_columns ( catenumpb.IndexColumn_DESC: tree.NewDString(catenumpb.IndexColumn_DESC.String()), } - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, parent catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + parent, table := descCtx.database, descCtx.table tableID := tree.NewDInt(tree.DInt(table.GetID())) parentName := parent.GetName() tableName := tree.NewDString(table.GetName()) @@ -4250,9 +4250,10 @@ CREATE TABLE crdb_internal.backward_dependencies ( viewDep := tree.NewDString("view") sequenceDep := tree.NewDString("sequence") - return forEachTableDescAllWithTableLookup(ctx, p, dbContext, hideVirtual, func( - ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor, tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + table, tableLookup := descCtx.table, descCtx.tableLookup tableID := tree.NewDInt(tree.DInt(table.GetID())) tableName := tree.NewDString(table.GetName()) @@ -4384,8 +4385,12 @@ CREATE TABLE crdb_internal.forward_dependencies ( fkDep := tree.NewDString("fk") viewDep := tree.NewDString("view") sequenceDep := tree.NewDString("sequence") - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no backward/forward dependencies*/ - func(ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{ + virtualOpts: hideVirtual, /* virtual tables have no backward/forward dependencies*/ + allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table tableID := tree.NewDInt(tree.DInt(table.GetID())) tableName := tree.NewDString(table.GetName()) for _, fk := range table.InboundForeignKeys() { @@ -5493,8 +5498,10 @@ CREATE TABLE crdb_internal.partitions ( dbName = dbContext.GetName() } worker := func(ctx context.Context, pusher rowPusher) error { - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no partitions*/ - func(ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual /* virtual tables have no partitions*/, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table return catalog.ForEachIndex(table, catalog.IndexOpts{ AddMutations: true, }, func(index catalog.Index) error { @@ -6221,10 +6228,11 @@ CREATE TABLE crdb_internal.invalid_objects ( // Validate table descriptors const allowAdding = true - if err := forEachTableDescWithTableLookupInternalFromDescriptors( - ctx, p, dbContext, hideVirtual, allowAdding, c, func( - ctx context.Context, _ catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, descriptor catalog.TableDescriptor, lCtx tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: allowAdding} + if err := forEachTableDescFromDescriptors( + ctx, p, dbContext, c, opts, + func(ctx context.Context, descCtx tableDescContext) error { + descriptor, lCtx := descCtx.table, descCtx.tableLookup return doDescriptorValidationErrors(ctx, descriptor, lCtx) }); err != nil { return err @@ -6310,7 +6318,9 @@ CREATE TABLE crdb_internal.cluster_database_privileges ( if err != nil || dbDesc == nil { return false, err } - hasPriv, err := userCanSeeDescriptor(ctx, p, dbDesc, nil /* parentDBDesc */, false /* allowAdding */) + hasPriv, err := userCanSeeDescriptor( + ctx, p, dbDesc, nil /* parentDBDesc */, false /* allowAdding */, false /* includeDropped */) + if err != nil || !hasPriv { return false, err } @@ -6384,8 +6394,10 @@ CREATE TABLE crdb_internal.cross_db_references ( STRING NOT NULL );`, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescAllWithTableLookup(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, lookupFn tableLookupFn) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, table, lookupFn := descCtx.schema, descCtx.table, descCtx.tableLookup // For tables detect if foreign key references point at a different // database. Additionally, check if any of the columns have sequence // references to a different database. @@ -6865,8 +6877,10 @@ CREATE TABLE crdb_internal.index_usage_statistics ( const numDatums = 4 row := make(tree.Datums, numDatums) worker := func(ctx context.Context, pusher rowPusher) error { - return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table tableID := table.GetID() return catalog.ForEachIndex(table, catalog.IndexOpts{}, func(idx catalog.Index) error { indexID := idx.GetID() @@ -7151,7 +7165,7 @@ CREATE VIEW crdb_internal.statement_activity AS contention_time_avg_seconds, cpu_sql_avg_nanos, service_latency_avg_seconds, - service_latency_p99_seconds + service_latency_p99_seconds FROM system.statement_activity`, resultColumns: colinfo.ResultColumns{ @@ -7640,10 +7654,10 @@ CREATE TABLE crdb_internal.transaction_contention_events ( contention_duration INTERVAL NOT NULL, contending_key BYTES NOT NULL, contending_pretty_key STRING NOT NULL, - + waiting_stmt_id string NOT NULL, waiting_stmt_fingerprint_id BYTES NOT NULL, - + database_name STRING NOT NULL, schema_name STRING NOT NULL, table_name STRING NOT NULL, @@ -7776,9 +7790,11 @@ CREATE TABLE crdb_internal.index_spans ( }, }, }, - populate: func(ctx context.Context, p *planner, db catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescAll(ctx, p, db, hideVirtual, - func(ctx context.Context, _ catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual, allowAdding: true} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table return generateIndexSpans(ctx, p, table, addRow) }) }, @@ -7808,6 +7824,7 @@ CREATE TABLE crdb_internal.table_spans ( descriptor_id INT NOT NULL, start_key BYTES NOT NULL, end_key BYTES NOT NULL, + dropped BOOL NOT NULL, INDEX(descriptor_id) );`, indexes: []virtualIndex{ @@ -7828,9 +7845,15 @@ CREATE TABLE crdb_internal.table_spans ( }, }, }, - populate: func(ctx context.Context, p *planner, db catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescAll(ctx, p, db, hideVirtual, - func(ctx context.Context, _ catalog.DatabaseDescriptor, _ catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { + opts := forEachTableDescOptions{ + virtualOpts: hideVirtual, + allowAdding: true, + includeDropped: true, + } + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table return generateTableSpan(ctx, p, table, addRow) }) }, @@ -7846,6 +7869,7 @@ func generateTableSpan( tree.NewDInt(tree.DInt(tabID)), tree.NewDBytes(tree.DBytes(start)), tree.NewDBytes(tree.DBytes(end)), + tree.MakeDBool(tree.DBool(table.Dropped())), ) } @@ -8097,7 +8121,7 @@ func genClusterLocksGenerator( if curLock.LockHolder != nil { txnIDDatum = tree.NewDUuid(tree.DUuid{UUID: curLock.LockHolder.ID}) tsDatum = eval.TimestampToInexactDTimestamp(curLock.LockHolder.WriteTimestamp) - isolationLevelDatum = tree.NewDString(tree.IsolationLevelFromKVTxnIsolationLevel(curLock.LockHolder.IsoLevel).String()) + isolationLevelDatum = tree.NewDString(tree.FromKVIsoLevel(curLock.LockHolder.IsoLevel).String()) strengthDatum = tree.NewDString(curLock.LockStrength.String()) durationDatum = tree.NewDInterval( duration.MakeDuration(curLock.HoldDuration.Nanoseconds(), 0 /* days */, 0 /* months */), @@ -8110,7 +8134,7 @@ func genClusterLocksGenerator( if waiter.WaitingTxn != nil { txnIDDatum = tree.NewDUuid(tree.DUuid{UUID: waiter.WaitingTxn.ID}) tsDatum = eval.TimestampToInexactDTimestamp(waiter.WaitingTxn.WriteTimestamp) - isolationLevelDatum = tree.NewDString(tree.IsolationLevelFromKVTxnIsolationLevel(waiter.WaitingTxn.IsoLevel).String()) + isolationLevelDatum = tree.NewDString(tree.FromKVIsoLevel(waiter.WaitingTxn.IsoLevel).String()) } strengthDatum = tree.NewDString(waiter.Strength.String()) durationDatum = tree.NewDInterval( @@ -9015,7 +9039,7 @@ var crdbInternalClusterReplicationResolvedView = virtualSchemaView{ CREATE VIEW crdb_internal.cluster_replication_spans AS WITH spans AS ( SELECT j.id AS job_id, jsonb_array_elements(crdb_internal.pb_to_json('progress', i.value)->'streamIngest'->'checkpoint'->'resolvedSpans') AS s - FROM system.jobs j LEFT JOIN system.job_info i ON j.id = i.job_id AND i.info_key = 'legacy_progress' + FROM system.jobs j LEFT JOIN system.job_info i ON j.id = i.job_id AND i.info_key = 'legacy_progress' WHERE j.job_type = 'REPLICATION STREAM INGESTION' ) SELECT job_id, @@ -9048,7 +9072,7 @@ CREATE TABLE crdb_internal.cluster_replication_node_streams ( checkpoints INT, megabytes FLOAT, last_checkpoint INTERVAL, - + produce_wait INTERVAL, emit_wait INTERVAL, last_produce_wait INTERVAL, @@ -9059,7 +9083,7 @@ CREATE TABLE crdb_internal.cluster_replication_node_streams ( rf_last_advance INTERVAL, rf_resolved DECIMAL, - rf_resolved_age INTERVAL + rf_resolved_age INTERVAL );`, populate: func(ctx context.Context, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { sm, err := p.EvalContext().StreamManagerFactory.GetReplicationStreamManager(ctx) @@ -9294,3 +9318,40 @@ CREATE TABLE crdb_internal.logical_replication_node_processors ( return nil }, } + +// crdbInternalFullyQualifiedNamesView is a view on system.namespace that +// provides fully qualified names for objects in the cluster. A row is only +// visible if the querying user has the CONNECT privilege on the database. +var crdbInternalFullyQualifiedNamesView = virtualSchemaView{ + schema: ` + CREATE VIEW crdb_internal.fully_qualified_names ( + object_id, + schema_id, + database_id, + object_name, + schema_name, + database_name, + fq_name + ) AS + SELECT + t.id, sc.id, db.id, + t.name, sc.name, db.name, + quote_ident(db.name) || '.' || quote_ident(sc.name) || '.' || quote_ident(t.name) + FROM system.namespace t + JOIN system.namespace sc ON t."parentSchemaID" = sc.id + JOIN system.namespace db on t."parentID" = db.id + -- Filter out the synthetic public schema for the system database. + WHERE db."parentID" = 0 + -- Filter rows that the user should not be able to see. This check matches + -- how metadata visibility works for pg_catalog tables. + AND pg_catalog.has_database_privilege(db.name, 'CONNECT')`, + resultColumns: colinfo.ResultColumns{ + {Name: "object_id", Typ: types.Int}, + {Name: "schema_id", Typ: types.Int}, + {Name: "database_id", Typ: types.Int}, + {Name: "object_name", Typ: types.String}, + {Name: "schema_name", Typ: types.String}, + {Name: "database_name", Typ: types.String}, + {Name: "fq_name", Typ: types.String}, + }, +} diff --git a/pkg/sql/create_function.go b/pkg/sql/create_function.go index a94012b510f0..033f0d843f55 100644 --- a/pkg/sql/create_function.go +++ b/pkg/sql/create_function.go @@ -575,6 +575,12 @@ func setFuncOptions( // Handle the body after the loop, since we don't yet know what language // it is. body = string(t) + case tree.RoutineSecurity: + sec, err := funcinfo.SecurityToProto(t) + if err != nil { + return err + } + udfDesc.SetSecurity(sec) default: return pgerror.Newf(pgcode.InvalidParameterValue, "Unknown function option %q", t) } diff --git a/pkg/sql/create_stats.go b/pkg/sql/create_stats.go index 4446a2d859b6..a1439abf22fe 100644 --- a/pkg/sql/create_stats.go +++ b/pkg/sql/create_stats.go @@ -135,15 +135,16 @@ func (n *createStatsNode) runJob(ctx context.Context) error { if err != nil { return err } + details := record.Details.(jobspb.CreateStatsDetails) - if n.Name != jobspb.AutoStatsName { + if n.Name != jobspb.AutoStatsName && n.Name != jobspb.AutoPartialStatsName { telemetry.Inc(sqltelemetry.CreateStatisticsUseCounter) } var job *jobs.StartableJob jobID := n.p.ExecCfg().JobRegistry.MakeJobID() if err := n.p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { - if n.Name == jobspb.AutoStatsName { + if n.Name == jobspb.AutoStatsName || n.Name == jobspb.AutoPartialStatsName { // Don't start the job if there is already a CREATE STATISTICS job running. // (To handle race conditions we check this again after the job starts, // but this check is used to prevent creating a large number of jobs that @@ -151,6 +152,13 @@ func (n *createStatsNode) runJob(ctx context.Context) error { if err := checkRunningJobsInTxn(ctx, jobspb.InvalidJobID, txn); err != nil { return err } + // Don't start auto partial stats jobs if there is another auto partial + // stats job running on the same table. + if n.Name == jobspb.AutoPartialStatsName { + if err := checkRunningAutoPartialJobsInTxn(ctx, jobspb.InvalidJobID, txn, n.p.ExecCfg().JobRegistry, details.Table.ID); err != nil { + return err + } + } } return n.p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &job, jobID, txn, *record) }); err != nil { @@ -686,10 +694,18 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er // associated txn. jobsPlanner := execCtx.(JobExecContext) details := r.job.Details().(jobspb.CreateStatsDetails) - if details.Name == jobspb.AutoStatsName { + if details.Name == jobspb.AutoStatsName || details.Name == jobspb.AutoPartialStatsName { + jobRegistry := jobsPlanner.ExecCfg().JobRegistry // We want to make sure that an automatic CREATE STATISTICS job only runs if // there are no other CREATE STATISTICS jobs running, automatic or manual. - if err := checkRunningJobs(ctx, r.job, jobsPlanner); err != nil { + if err := checkRunningJobs( + ctx, + r.job, + jobsPlanner, + details.Name == jobspb.AutoPartialStatsName, + jobRegistry, + details.Table.ID, + ); err != nil { return err } } @@ -728,6 +744,9 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er var err error if details.UsingExtremes { for i, colStat := range details.ColumnStats { + if ctx.Err() != nil { + return ctx.Err() + } // Plan and run partial stats on multiple columns separately since each // partial stat collection will use a different index and have different // plans. @@ -820,21 +839,37 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er // pending, running, or paused status that started earlier than this one. If // there are, checkRunningJobs returns an error. If job is nil, checkRunningJobs // just checks if there are any pending, running, or paused CreateStats jobs. -func checkRunningJobs(ctx context.Context, job *jobs.Job, p JobExecContext) error { +// If autoPartial is true, checkRunningJobs also checks if there are any other +// AutoCreatePartialStats jobs in the pending, running, or paused status that +// started earlier than this one for the same table. +func checkRunningJobs( + ctx context.Context, + job *jobs.Job, + p JobExecContext, + autoPartial bool, + jobRegistry *jobs.Registry, + tableID descpb.ID, +) error { jobID := jobspb.InvalidJobID if job != nil { jobID = job.ID() } return p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { - return checkRunningJobsInTxn(ctx, jobID, txn) + if err = checkRunningJobsInTxn(ctx, jobID, txn); err != nil { + return err + } + if autoPartial { + return checkRunningAutoPartialJobsInTxn(ctx, jobID, txn, jobRegistry, tableID) + } + return nil }) } -// checkRunningJobsInTxn checks whether there are any other CreateStats jobs in -// the pending, running, or paused status that started earlier than this one. If -// there are, checkRunningJobsInTxn returns an error. If jobID is -// jobspb.InvalidJobID, checkRunningJobsInTxn just checks if there are any pending, -// running, or paused CreateStats jobs. +// checkRunningJobsInTxn checks whether there are any other CreateStats jobs +// (excluding auto partial stats jobs) in the pending, running, or paused status +// that started earlier than this one. If there are, checkRunningJobsInTxn +// returns an error. If jobID is jobspb.InvalidJobID, checkRunningJobsInTxn just +// checks if there are any pending, running, or paused CreateStats jobs. func checkRunningJobsInTxn(ctx context.Context, jobID jobspb.JobID, txn isql.Txn) error { exists, err := jobs.RunningJobExists(ctx, jobID, txn, jobspb.TypeCreateStats, jobspb.TypeAutoCreateStats, @@ -850,6 +885,40 @@ func checkRunningJobsInTxn(ctx context.Context, jobID jobspb.JobID, txn isql.Txn return nil } +// checkRunningAutoPartialJobsInTxn checks whether there are any other +// AutoCreatePartialStats jobs in the pending, running, or paused status that +// started earlier than this one for the same table. If there are, an error is +// returned. If jobID is jobspb.InvalidJobID, checkRunningAutoPartialJobsInTxn +// just checks if there are any pending, running, or paused +// AutoCreatePartialStats jobs for the same table. +func checkRunningAutoPartialJobsInTxn( + ctx context.Context, + jobID jobspb.JobID, + txn isql.Txn, + jobRegistry *jobs.Registry, + tableID descpb.ID, +) error { + autoPartialStatJobIDs, err := jobs.RunningJobs(ctx, jobID, txn, + jobspb.TypeAutoCreatePartialStats, + ) + if err != nil { + return err + } + + for _, id := range autoPartialStatJobIDs { + job, err := jobRegistry.LoadJobWithTxn(ctx, id, txn) + if err != nil { + return err + } + jobDetails := job.Details().(jobspb.CreateStatsDetails) + if jobDetails.Table.ID == tableID { + return stats.ConcurrentCreateStatsError + } + } + + return nil +} + // OnFailOrCancel is part of the jobs.Resumer interface. func (r *createStatsResumer) OnFailOrCancel(context.Context, interface{}, error) error { return nil } @@ -862,4 +931,5 @@ func init() { } jobs.RegisterConstructor(jobspb.TypeCreateStats, createResumerFn, jobs.UsesTenantCostControl) jobs.RegisterConstructor(jobspb.TypeAutoCreateStats, createResumerFn, jobs.UsesTenantCostControl) + jobs.RegisterConstructor(jobspb.TypeAutoCreatePartialStats, createResumerFn, jobs.UsesTenantCostControl) } diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index b368ae99169d..a3c52f8a1165 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -883,7 +883,7 @@ func ResolveFK( } if target.ParentID != tbl.ParentID { if !allowCrossDatabaseFKs.Get(&evalCtx.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.InvalidForeignKey, "foreign references between databases are not allowed (see the '%s' cluster setting)", allowCrossDatabaseFKsSetting), @@ -2058,13 +2058,6 @@ func NewTableDesc( } } - // If explicit primary keys are required, error out since a primary key was not supplied. - if desc.GetPrimaryIndex().NumKeyColumns() == 0 && desc.IsPhysicalTable() && evalCtx != nil && - evalCtx.SessionData() != nil && evalCtx.SessionData().RequireExplicitPrimaryKeys { - return nil, errors.Errorf( - "no primary key specified for table %s (require_explicit_primary_keys = true)", desc.Name) - } - for i := range desc.Columns { if _, ok := primaryIndexColumnSet[desc.Columns[i].Name]; ok { desc.Columns[i].Nullable = false @@ -2086,6 +2079,16 @@ func NewTableDesc( return nil, err } + // If explicit primary keys are required, error out if a primary key was not + // supplied. + if desc.IsPhysicalTable() && + evalCtx != nil && evalCtx.SessionData() != nil && + evalCtx.SessionData().RequireExplicitPrimaryKeys && + desc.IsPrimaryIndexDefaultRowID() { + return nil, errors.Errorf( + "no primary key specified for table %s (require_explicit_primary_keys = true)", desc.Name) + } + for _, idx := range desc.PublicNonPrimaryIndexes() { // Increment the counter if this index could be storing data across multiple column families. if idx.NumSecondaryStoredColumns() > 1 && len(desc.Families) > 1 { diff --git a/pkg/sql/create_tenant.go b/pkg/sql/create_tenant.go index eac03eb01e3e..63d8cd4d5ae4 100644 --- a/pkg/sql/create_tenant.go +++ b/pkg/sql/create_tenant.go @@ -13,15 +13,12 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/errors" ) type createTenantNode struct { - ifNotExists bool - tenantSpec tenantSpec - likeTenantSpec tenantSpec + ifNotExists bool + tenantSpec tenantSpec } func (p *planner) CreateTenantNode(ctx context.Context, n *tree.CreateTenant) (planNode, error) { @@ -29,17 +26,9 @@ func (p *planner) CreateTenantNode(ctx context.Context, n *tree.CreateTenant) (p if err != nil { return nil, err } - var likeTenantSpec tenantSpec - if n.Like.OtherTenant != nil { - likeTenantSpec, err = p.planTenantSpec(ctx, n.Like.OtherTenant, "CREATE VIRTUAL CLUSTER LIKE") - if err != nil { - return nil, err - } - } return &createTenantNode{ - ifNotExists: n.IfNotExists, - tenantSpec: tspec, - likeTenantSpec: likeTenantSpec, + ifNotExists: n.IfNotExists, + tenantSpec: tspec, }, nil } @@ -49,20 +38,6 @@ func (n *createTenantNode) startExec(params runParams) error { return err } - var tmplInfo *mtinfopb.TenantInfo - if n.likeTenantSpec != nil { - tmplInfo, err = n.likeTenantSpec.getTenantInfo(params.ctx, params.p) - if err != nil { - return errors.Wrap(err, "retrieving record for LIKE configuration template") - } - } - configTemplate, err := GetTenantTemplate(params.ctx, - params.p.ExecCfg().Settings, params.p.InternalSQLTxn(), - tmplInfo, 0, "") - if err != nil { - return err - } - var ctcfg createTenantConfig if tenantName != "" { ctcfg.Name = (*string)(&tenantName) @@ -72,7 +47,7 @@ func (n *createTenantNode) startExec(params runParams) error { ctcfg.ID = &tenantID } ctcfg.IfNotExists = n.ifNotExists - _, err = params.p.createTenantInternal(params.ctx, ctcfg, configTemplate) + _, err = params.p.createTenantInternal(params.ctx, ctcfg) return err } diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index 4e03920f7638..4bd1a4523c9a 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -88,7 +88,7 @@ func (n *createViewNode) startExec(params runParams) error { if !allowCrossDatabaseViews.Get(¶ms.p.execCfg.Settings.SV) { for _, dep := range n.planDeps { if dbID := dep.desc.GetParentID(); dbID != n.dbDesc.GetID() && dbID != keys.SystemDatabaseID { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.FeatureNotSupported, "the view cannot refer to other databases; (see the '%s' cluster setting)", allowCrossDatabaseViewsSetting), diff --git a/pkg/sql/delegate/BUILD.bazel b/pkg/sql/delegate/BUILD.bazel index b382264c2bd8..83a1a9fe3331 100644 --- a/pkg/sql/delegate/BUILD.bazel +++ b/pkg/sql/delegate/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "show_functions.go", "show_grants.go", "show_jobs.go", + "show_logical_replication_jobs.go", "show_partitions.go", "show_queries.go", "show_range_for_row.go", diff --git a/pkg/sql/delegate/delegate.go b/pkg/sql/delegate/delegate.go index d034f7a4e9d6..ddf4606a4ae2 100644 --- a/pkg/sql/delegate/delegate.go +++ b/pkg/sql/delegate/delegate.go @@ -96,6 +96,9 @@ func TryDelegate( case *tree.ShowJobs: return d.delegateShowJobs(t) + case *tree.ShowLogicalReplicationJobs: + return d.delegateShowLogicalReplicationJobs(t) + case *tree.ShowChangefeedJobs: return d.delegateShowChangefeedJobs(t) diff --git a/pkg/sql/delegate/show_logical_replication_jobs.go b/pkg/sql/delegate/show_logical_replication_jobs.go new file mode 100644 index 000000000000..6e7c421b7811 --- /dev/null +++ b/pkg/sql/delegate/show_logical_replication_jobs.go @@ -0,0 +1,75 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package delegate + +import ( + "fmt" + + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" +) + +const ( + baseSelectClause = ` +WITH table_names AS ( + SELECT + t.job_id, + array_agg(t.table_name) AS targets + FROM ( + SELECT + id AS job_id, + crdb_internal.get_fully_qualified_table_name(jsonb_array_elements(crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload)->'logicalReplicationDetails'->'replicationPairs')['dstDescriptorId']::INT) AS table_name + FROM crdb_internal.system_jobs + WHERE job_type = 'LOGICAL REPLICATION' + ) AS t + GROUP BY t.job_id +) + +SELECT + job_info.id AS job_id, + job_info.status, + table_names.targets AS targets, + hlc_to_timestamp((crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Progress', + job_info.progress)->'LogicalReplication'->'replicatedTime'->>'wallTime')::DECIMAL) AS replicated_time%s +FROM crdb_internal.system_jobs AS job_info +LEFT JOIN table_names +ON job_info.id = table_names.job_id +WHERE job_type = 'LOGICAL REPLICATION' +` + + withDetailsClause = ` + , + hlc_to_timestamp((crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload)->'logicalReplicationDetails'->'replicationStartTime'->>'wallTime')::DECIMAL) AS replication_start_time, + IFNULL(crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload)->'logicalReplicationDetails'->'defaultConflictResolution'->>'conflictResolutionType', 'LWW') AS conflict_resolution_type, + crdb_internal.pb_to_json( + 'cockroach.sql.jobs.jobspb.Payload', + payload)->>'description' AS description` +) + +func (d *delegator) delegateShowLogicalReplicationJobs( + n *tree.ShowLogicalReplicationJobs, +) (tree.Statement, error) { + sqltelemetry.IncrementShowCounter(sqltelemetry.LogicalReplicationJobs) + var sqlStmt string + if n.WithDetails { + sqlStmt = fmt.Sprintf(baseSelectClause, withDetailsClause) + } else { + sqlStmt = fmt.Sprintf(baseSelectClause, "") + } + return d.parse(sqlStmt) +} diff --git a/pkg/sql/delegate/show_ranges.go b/pkg/sql/delegate/show_ranges.go index bfed87cf9588..72a3e1749bd8 100644 --- a/pkg/sql/delegate/show_ranges.go +++ b/pkg/sql/delegate/show_ranges.go @@ -287,6 +287,12 @@ AND s.end_key > r.start_key`) fmt.Fprintf(&buf, " AND s.index_id = %d", idx.ID()) } } + + // Exclude dropped tables from .crdb_internal.table_spans + if n.Source != tree.ShowRangesIndex && n.Options.Mode != tree.ExpandIndexes { + buf.WriteString(" AND s.dropped = false") + } + buf.WriteString("\n)") // end of ranges CTE. // Now, enhance the result set so far with additional table/index diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 573181e0d5c7..7404e048d09e 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -818,6 +818,9 @@ const ( // NodeUnhealthy means that the node should be avoided because // it's not healthy. NodeUnhealthy + // NodeDraining means that the node should be avoided because + // it's draining. + NodeDraining ) // spanPartitionState captures information about the current state of the @@ -1567,6 +1570,7 @@ func (dsp *DistSQLPlanner) deprecatedHealthySQLInstanceIDForKVNodeIDSystem( func (dsp *DistSQLPlanner) checkInstanceHealth( instanceID base.SQLInstanceID, instanceRPCAddr string, + isDraining bool, nodeStatusesCache map[base.SQLInstanceID]NodeStatus, ) NodeStatus { if nodeStatusesCache != nil { @@ -1575,7 +1579,9 @@ func (dsp *DistSQLPlanner) checkInstanceHealth( } } status := NodeOK - if err := dsp.nodeHealth.connHealthInstance(instanceID, instanceRPCAddr); err != nil { + if isDraining { + status = NodeDraining + } else if err := dsp.nodeHealth.connHealthInstance(instanceID, instanceRPCAddr); err != nil { if errors.Is(err, rpc.ErrNotHeartbeated) { // Consider ErrNotHeartbeated as a temporary error (see its description) and // avoid caching its result, as it can resolve to a more accurate result soon. @@ -1624,7 +1630,7 @@ func (dsp *DistSQLPlanner) healthySQLInstanceIDForKVNodeHostedInstanceResolver( sqlInstance := base.SQLInstanceID(nodeID) if n, ok := instances[sqlInstance]; ok { if status := dsp.checkInstanceHealth( - sqlInstance, n.InstanceRPCAddr, planCtx.nodeStatuses); status == NodeOK { + sqlInstance, n.InstanceRPCAddr, n.IsDraining, planCtx.nodeStatuses); status == NodeOK { return sqlInstance, SpanPartitionReason_TARGET_HEALTHY } } @@ -1688,7 +1694,8 @@ func (dsp *DistSQLPlanner) filterUnhealthyInstances( for _, n := range instances { // Gateway is always considered healthy if n.InstanceID == dsp.gatewaySQLInstanceID || - dsp.checkInstanceHealth(n.InstanceID, n.InstanceRPCAddr, nodeStatusesCache) == NodeOK { + dsp.checkInstanceHealth(n.InstanceID, n.InstanceRPCAddr, + n.IsDraining, nodeStatusesCache) == NodeOK { instances[j] = n j++ } else { diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index 357ce6b1d4df..f2fd5c0af9e0 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -14,6 +14,7 @@ import ( "context" gosql "database/sql" "fmt" + "io" "reflect" "strconv" "strings" @@ -29,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -389,9 +391,13 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) { func TestDistSQLUnavailableHosts(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + + skip.UnderDuress(t, "takes 1m") + ctx := context.Background() const numNodes = 5 + const lastServerIdx = numNodes - 1 const n = 100 @@ -403,7 +409,7 @@ func TestDistSQLUnavailableHosts(t *testing.T) { ) startAndSetupCluster := func(t *testing.T, mode tenantMode) ( - serverutils.TestClusterInterface, *sqlutils.SQLRunner) { + serverutils.TestClusterInterface, serverutils.ApplicationLayerInterface, *sqlutils.SQLRunner) { tc := serverutils.StartCluster(t, numNodes, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ @@ -413,17 +419,22 @@ func TestDistSQLUnavailableHosts(t *testing.T) { }) var db *gosql.DB + var lastServer serverutils.ApplicationLayerInterface switch mode { case singleTenant: db = tc.ServerConn(0) + lastServer = tc.Server(lastServerIdx) case sharedProcess: for i := range tc.NumServers() { - _, tenantDB, err := tc.Server(i).TenantController().StartSharedProcessTenant( + tenant, tenantDB, err := tc.Server(i).TenantController().StartSharedProcessTenant( ctx, base.TestSharedProcessTenantArgs{TenantName: "app"}) require.NoError(t, err) if db == nil { db = tenantDB } + if i == lastServerIdx { + lastServer = tenant + } } case externalProcess: for i := range tc.NumServers() { @@ -437,6 +448,9 @@ func TestDistSQLUnavailableHosts(t *testing.T) { if db == nil { db = tenant.SQLConn(t) } + if i == lastServerIdx { + lastServer = tenant + } } // Grant capability to run RELOCATE to secondary (test) tenant. @@ -467,7 +481,7 @@ func TestDistSQLUnavailableHosts(t *testing.T) { } r.Exec(t, fmt.Sprintf("INSERT INTO t SELECT i, i*i FROM generate_series(1, %d) AS g(i)", n)) - return tc, r + return tc, lastServer, r } // Use a query that uses the entire table and is easy to verify. @@ -499,9 +513,7 @@ func TestDistSQLUnavailableHosts(t *testing.T) { } t.Run("unhealthy-nodes-in-single-tenant-mode", func(t *testing.T) { - skip.UnderDuress(t, "takes 20s") - - tc, r := startAndSetupCluster(t, singleTenant) + tc, _, r := startAndSetupCluster(t, singleTenant) defer tc.Stopper().Stop(context.Background()) r.CheckQueryResults(t, @@ -555,9 +567,7 @@ func TestDistSQLUnavailableHosts(t *testing.T) { }) t.Run("unhealthy-nodes-in-shared-mode", func(t *testing.T) { - skip.UnderDuress(t, "takes 20s") - - tc, r := startAndSetupCluster(t, sharedProcess) + tc, _, r := startAndSetupCluster(t, sharedProcess) defer tc.Stopper().Stop(context.Background()) r.CheckQueryResults(t, @@ -605,9 +615,7 @@ func TestDistSQLUnavailableHosts(t *testing.T) { }) t.Run("unhealthy-nodes-in-external-mode", func(t *testing.T) { - skip.UnderDuress(t, "takes 20s") - - tc, r := startAndSetupCluster(t, externalProcess) + tc, _, r := startAndSetupCluster(t, externalProcess) defer tc.Stopper().Stop(context.Background()) r.CheckQueryResults(t, @@ -664,88 +672,162 @@ func TestDistSQLUnavailableHosts(t *testing.T) { t.Error(err) } }) -} -func TestDistSQLDrainingHosts(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) + drainServer := func(t *testing.T, server serverutils.ApplicationLayerInterface, idx int) { + c := server.GetAdminClient(t) + req := &serverpb.DrainRequest{DoDrain: true} + stream, err := c.Drain(ctx, req) + require.NoError(t, err) + _, err = stream.Recv() + require.NoError(t, err) + unexpected, err := stream.Recv() + if err != io.EOF { + if unexpected != nil { + t.Fatalf("unexpected additional response: %v // %v", unexpected, err) + } + if err == nil { + err = errors.New("unexpected response") + } + t.Fatal(err) + } + } - const numNodes = 2 - tc := serverutils.StartCluster( - t, - numNodes, - base.TestClusterArgs{ - ReplicationMode: base.ReplicationManual, - ServerArgs: base.TestServerArgs{Knobs: base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{DrainFast: true}}, UseDatabase: "test"}, - }, - ) - ctx := context.Background() - defer tc.Stopper().Stop(ctx) + t.Run("draining-nodes-in-single-tenant-mode", func(t *testing.T) { + tc, lastServer, r := startAndSetupCluster(t, singleTenant) + defer tc.Stopper().Stop(context.Background()) - conn := tc.ServerConn(0) - sqlutils.CreateTable( - t, - conn, - "nums", - "num INT", - numNodes, /* numRows */ - sqlutils.ToRowFn(sqlutils.RowIdxFn), - ) + r.CheckQueryResults(t, + `SELECT IF(substring(start_key for 1)='…',start_key,NULL), + IF(substring(end_key for 1)='…',end_key,NULL), + lease_holder, replicas FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`, + [][]string{ + {"NULL", "…/1/0", "1", "{1}"}, + {"…/1/0", "…/1/20", "1", "{1,2,3}"}, + {"…/1/20", "…/1/40", "2", "{2,3,4}"}, + {"…/1/40", "…/1/60", "3", "{1,3,4}"}, + {"…/1/60", "…/1/80", "4", "{1,2,4}"}, + {"…/1/80", "NULL", "5", "{2,3,5}"}, + }, + ) + + require.NoError(t, runQuery(t, r)) + + // Verify the plan should include all 5 nodes. + require.NoError(t, checkQueryPlan(t, r, + []string{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklV9ro0wUxu_fTyEHXtrCiI4aY7xqaVwaNv2zMcsulFBm46krNU46M9KWku--GNutcRtR40XAceZ5fvOcmZNXkI8p-BAG0-B8riXZPde-zK4vtdvg5830bHKlHY8n4Tz8NiVaeHF2E5xob1Nlvjp-lo85ExidlGvUQvtxEcyCUmY6-RpoR-OExYKt_j8CAhmP8IqtUIJ_CxQIWEDABgIOEBjAgsBa8CVKyUUx5XW7YBI9g28SSLJ1rorhBYElFwj-K6hEpQg-zNmvFGfIIhSGCQQiVCxJtzbqVN2tH_AFCJzzNF9l0tfesYFAuGbFiG5YJiw2BHiuPmykYjGCTytckzH45oa0RzuLY4ExU1wYg12y8Pvl8Sk92Wtr1WwHe20_3PKMiwjLrX1YLTbNYNTsRmbXyOhuIrR9sWifYhmWqRtO-3rRLnSVWNzD6uXu2FrtQ7F6heKYuuG2D8XqQlcJZXhYKMMdW7t9KHavUFxTN7z2odhd6CqheIeF4u3YOu1DcXqF4pl660ScLmiVREaHJTLq0mJnKNc8k1jreZ87mTUnnRbNEaMYy04qeS6WeCP4cju3fL3eCm0HIpSq_ErLl0n2_kkqgWz19x-iqkQblawdJVpVGtSVrGamLlB2o5SzX4nWlZy-23PrSoNGJXc_k1VXcvsyDetKw0Ylbz-TXVfy-jJ5daVR8zEw90M5_5zN5mPeQDUqrs59yp_ukgh8MN8e_ZOf9weKBSyWxf0Nf_Onrez8ZV3cvnuWSiRwyR5wjArFKskSqZIl-ErkuNn89ycAAP__hKt0rw=="})) + + // Drain the fourth node. + drainServer(t, lastServer, 4 /* idx */) + + plans := []string{ + // 5th range is planned on 1st node (gateway) + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF9vmzAUxd_3KawrTW0lR2AgJOOpVcPUaOmfhUybVEWVF24pKsGpbdRWVb77BLQrQQ0CygMStvmd43Pt-wLqIQEPAn_mny5InN4K8n1-eU6u_T9Xs5PpBTmcTINF8HNGSXB2cuUfkdelKlsfPqmHjEsMj8p_9JL8PvPnfomZTX_45GAS80jy9dcDoJCKEC_4GhV418CAggUUbKDgwJLCRooVKiVkPv1SLJ6GT-CZFOJ0k-l8eElhJSSC9wI61gmCBwv-N8E58hClYQKFEDWPk0JCH-ubzT0-A4VTkWTrVHnkzTJQCDY8HxkYlkl4GhJGhL5DCcstBZHpd0mleYTgsYrH6QQ8c0vb2zyJIokR10Iazq7L4Nf54TE72itr1WSdvbLvalkqZIjlNt-llttmY-NuxuyasfGOMda-bKxP2QzLHBiO2bparIu7SijDz1VruCNrtQ_F6hWKYw4Mt30oVhd3lVDcz4Xi7sja7UOxe4XimgNj3D4Uu4u7Siijz4Uy6tJO5qg2IlVYu98fK5k1pQHLGwGGEZZdQ4lMrvBKilWxtvy8LEDFQIhKl7Os_Jimb1NKS-Tr_92wSmKNJGuHxKokp06yGknfOniyG0nOfhKrk5y-uxvWScNGkrvfk1UnuX09uXXSqJE03u_JrpPGfT2N8jN6m4jHmzgED8zXZ_DB6-2B_AceqfyiBHfiscAunjf5Mb_liUIK5_weJ6hRruM0Vjpegadlhtvtl38BAAD__7Hc7LA=", + // 5th range is planned on 2nd node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF9vmzAUxd_3KawrTW0lR2AgJOOpVcPUaOmfhUybVEWVF24pKsGpbdRWVb77BLQrQQ0CygMStvmd43Pt-wLqIQEPAn_mny5InN4K8n1-eU6u_T9Xs5PpBTmcTINF8HNGSXB2cuUfkdelKlsfPqmHjEsMj8p_9JL8PvPnfomZTX_45GAS80jy9dcDoJCKEC_4GhV418CAggUUbKDgwJLCRooVKiVkPv1SLJ6GT-CZFOJ0k-l8eElhJSSC9wI61gmCBwv-N8E58hClYQKFEDWPk0JCH-ubzT0-A4VTkWTrVHnkzTJQCDY8HxkYlgnLLQWR6XcZpXmE4LGKr-kEPHNL21s7iSKJEddCGs6us-DX-eExO9ora9Vknb2y72pZKmSI5dbepZbbZmPjbsbsmrHxjjHWvlSsT6kMyxwYjkl4GhJGhL5D2bpyrIvTSkDDz1VuuCNrtQ_I6hWQYw4Mt_1xtrq4q4Tifi4Ud0fWbh-K3SsU1xwY4_ah2F3cVUIZfS6UUZfWMke1EanC2l3_WMmsKQ1Y3hQwjLDsIEpkcoVXUqyKteXnZQEqBkJUupxl5cc0fZtSWiJf_--MVRJrJFk7JFYlOXWS1Uj61sGT3Uhy9pNYneT03d2wTho2ktz9nqw6ye3rya2TRo2k8X5Pdp007utplJ_R20Q83sQheGC-PoMPXm8P5D_wSOUXJbgTjwV28bzJj_ktTxRSOOf3OEGNch2nsdLxCjwtM9xuv_wLAAD__7Co7LA=", + // 5th range is planned on 3rd node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF9vmzAUxd_3KawrTW0lR2AgJOOpVcPUaOmfhUybVEWVF24pKsGpbdRWVb77BLQrQQ0CygMStvmd43Pt-wLqIQEPAn_mny5InN4K8n1-eU6u_T9Xs5PpBTmcTINF8HNGSXB2cuUfkdelKlsfPqmHjEsMj8p_9JL8PvPnfomZTX_45GAS80jy9dcDoJCKEC_4GhV418CAggUUbKDgwJLCRooVKiVkPv1SLJ6GT-CZFOJ0k-l8eElhJSSC9wI61gmCBwv-N8E58hClYQKFEDWPk0JCH-ubzT0-A4VTkWTrVHnkzTJQCDY8HxkYlgnLLQWR6XcZpXmE4LGKr-kEPHNL21s7iSKJEddCGs6us-DX-eExO9ora9Vknb2y72pZKmSI5dbepZbbZmPjbsbsmrHxjjHWvlSsT6kMyxwYTvtqsS7uKqEMP1et4Y6s1T4Uq1cojjkwXJPwNCSMCH2HsnVAVhenlYDczwXk7sja7QOyewXkmgNj3P7U2F3cVUIZfS6UUZfWMke1EanC2l3_WMmsKQ1Y3hQwjLDsIEpkcoVXUqyKteXnZQEqBkJUupxl5cc0fZtSWiJf_--MVRJrJFk7JFYlOXWS1Uj61sGT3Uhy9pNYneT03d2wTho2ktz9nqw6ye3rya2TRo2k8X5Pdp007utplJ_R20Q83sQheGC-PoMPXm8P5D_wSOUXJbgTjwV28bzJj_ktTxRSOOf3OEGNch2nsdLxCjwtM9xuv_wLAAD__6oi7LA=", + } - db := tc.ServerConn(0) - db.SetMaxOpenConns(1) - r := sqlutils.MakeSQLRunner(db) - - // Force the query to be distributed. - r.Exec(t, "SET DISTSQL = ON") - - // Shortly after starting a cluster, the first server's StorePool may not be - // fully initialized and ready to do rebalancing yet, so wrap this in a - // SucceedsSoon. - testutils.SucceedsSoon(t, func() error { - _, err := db.Exec( - fmt.Sprintf(`ALTER TABLE nums SPLIT AT VALUES (1); - ALTER TABLE nums EXPERIMENTAL_RELOCATE VALUES (ARRAY[%d], 1);`, - tc.Server(1).GetFirstStoreID(), - ), + // It can be either planned on gateway if lease for range 5 haven't moved or + // node 2/3 if it did. + testutils.SucceedsWithin(t, func() error { + return checkQueryPlan(t, r, plans) + }, 2*time.Second) + + // Now run the query to ensure the correct result + require.NoError(t, runQuery(t, r)) + }) + + t.Run("draining-nodes-in-shared-mode", func(t *testing.T) { + skip.UnderShort(t, "takes 20s") + + tc, lastServer, r := startAndSetupCluster(t, sharedProcess) + defer tc.Stopper().Stop(context.Background()) + + r.CheckQueryResults(t, + `SELECT IF(substring(start_key for 1)='…',start_key,NULL), + IF(substring(end_key for 1)='…',end_key,NULL), + lease_holder, replicas FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`, + [][]string{ + {"NULL", "…/Table/106/1/0", "1", "{1}"}, + {"…/Table/106/1/0", "…/Table/106/1/20", "1", "{1,2,3}"}, + {"…/Table/106/1/20", "…/Table/106/1/40", "2", "{2,3,4}"}, + {"…/Table/106/1/40", "…/Table/106/1/60", "3", "{1,3,4}"}, + {"…/Table/106/1/60", "…/Table/106/1/80", "4", "{1,2,4}"}, + {"…/Table/106/1/80", "NULL", "5", "{2,3,5}"}, + }, ) - return err + + require.NoError(t, runQuery(t, r)) + + // Verify the plan should include all 5 nodes. + require.NoError(t, checkQueryPlan(t, r, + []string{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklV9vmzAUxd_3KdCVpraSq2AghPDUqmFqtPTPQqZNqqLKC7cMleDUNmqrKt99IjQNYQVB4CERxj7n8Lv25Q3kUwwu-N7Eu5hpUfLAtW_Tmyvtzvt9OzkfX2vHo7E_839MiOZfnt96J9r7VJkuj1_kU8oEBif5GjXXfl16Uy-XmYy_e9rRKGKhYMuvR0Ag4QFesyVKcO-AAgEDCJhAwAICfZgTWAm-QCm5yKa8bRaMgxdwdQJRskpVNjwnsOACwX0DFakYwYUZ-xPjFFmAoqcDgQAVi-KNjTpT96tHfAUCFzxOl4l0tW1sIOCvWDbSo7rdo6f5X8_QYb4mwFO1s5SKhQguLWQcj8DV16R5zPMwFBgyxUWvv5_S_3l1fEZPKm2Nkm2_0nbnliZcBJi_5s5qvq4PRvV2ycxSMrpPhDYvHD28cD1D39bOal472iZpAZHdrXb2nq3RHJDRAZD1AchuDshok7QAaNAN0GDP1mwOyOwAyP4A5DQHZLZJWgDkdAPk7NlazQFZHQA574CMxnSsNjELdIbd6AzbtOQpyhVPJJZ65OdOesnplGbNFIMQ884reSoWeCv4YjM3v73ZCG0GApQqf0rzm3GyfSSVQLb8-KIUlWitkrGnRItK_bKSUZ-pTSizVsqqVqJlJevQ17PLSv1aJbs6k1FWsg_NNCgrDWqVnOpMZlnJOTSTU1Ya1m8DvTqU9d_erN_mNamG2dF5iPnzfRSAC_r7dfrJz_aCbAELZXZ-_b_8eSM7e11lp--BxRIJXLFHHKFCsYySSKpoAa4SKa7XX_4FAAD__5H-gCw="})) + + // Drain the fourth node. + drainServer(t, lastServer, 4 /* idx */) + + // Range for 5th node should be planned on the gateway + testutils.SucceedsWithin(t, func() error { + return checkQueryPlan(t, r, + []string{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF1ro04Uxu__n2I48KctTImjxmS9amlcGjZ92ZhlF0oos_HUSo1jZ0baUvLdF7VpjDSiiRcJzsvveeY543kH9RyDC7438S5mJEoeBPk-vbkid96f28n5-Jocj8b-zP85ocS_PL_1TsjHUpUtj1_Vc8YlBiflHj0nvy-9qVdiJuMfHjkaRTyUfPn_EVBIRIDXfIkK3DtgQMEEChZQsGFOIZVigUoJmU-_F4vHwSu4BoUoSTOdD88pLIREcN9BRzpGcGHG_8Y4RR6g7BlAIUDNo7iQ0Gf6Pn3CN6BwIeJsmSiXrC0DBT_l-UiPGU6PnZZ_PdMgPAkII0I_ooT5ioLI9EZeaR4iuKzidzwC11jR9pbPw1BiyLWQPXvbsf_r6viMneyUNWuy9k7ZjVqWCBlgeeSN1HzVbGzYzZhVMzbcMsbal5DtX8KeaayraButK8e6OK0E1D-scv0tWbN9QOYBAdmfATntAzK7OK0E5BwWkLMla7UPyDogIOczoGH7gKwuTisBDQ4LaNCl5UxRpSJRWOsBXysZNaVTljcLDEIsO4sSmVzgrRSLYm35elOAioEAlS5nWfkyTtZTSkvky8-OWSWxRpK5RWJVkl0nmY2kbx08WY0kezeJ1Un2vqfr10n9RpKz25NZJzn7enLqpEEjabjbk1UnDff1NMjv6EMsXu6jAFwwPp7TL37WD-QbeKjyD8V_FC8FdvaW5tf8gccKKVzxJxyhRrmMkkjpaAGulhmuVv_9CwAA__-0ofXg"}) + }, time.Second) + + // Now run the query to ensure the correct result + require.NoError(t, runQuery(t, r)) }) - // Ensure that the range cache is populated (see #31235). - r.Exec(t, "SHOW RANGES FROM TABLE nums") - - const query = "SELECT count(*) FROM NUMS" - expectPlan := func(expectedPlan [][]string) { - t.Helper() - planQuery := fmt.Sprintf(`SELECT info FROM [EXPLAIN (DISTSQL, SHAPE) %s] WHERE info LIKE 'Diagram%%'`, query) - testutils.SucceedsSoon(t, func() error { - resultPlan := r.QueryStr(t, planQuery) - if !reflect.DeepEqual(resultPlan, expectedPlan) { - return errors.Errorf("\nexpected:%v\ngot:%v", expectedPlan, resultPlan) - } - return nil - }) - } + t.Run("draining-nodes-in-external-mode", func(t *testing.T) { + tc, lastServer, r := startAndSetupCluster(t, externalProcess) + defer tc.Stopper().Stop(context.Background()) + + r.CheckQueryResults(t, + `SELECT IF(substring(start_key for 1)='…',start_key,NULL), + IF(substring(end_key for 1)='…',end_key,NULL), + lease_holder, replicas FROM [SHOW RANGES FROM TABLE t WITH DETAILS]`, + [][]string{ + {"NULL", "…/Table/106/1/0", "1", "{1}"}, + {"…/Table/106/1/0", "…/Table/106/1/20", "1", "{1,2,3}"}, + {"…/Table/106/1/20", "…/Table/106/1/40", "2", "{2,3,4}"}, + {"…/Table/106/1/40", "…/Table/106/1/60", "3", "{1,3,4}"}, + {"…/Table/106/1/60", "…/Table/106/1/80", "4", "{1,2,4}"}, + {"…/Table/106/1/80", "NULL", "5", "{2,3,5}"}, + }, + ) - // Verify distribution. - expectPlan([][]string{{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyskl9r2zAUxd_3KcyF0XQoJHKyFz01JB41y7_FLh0UEzT7xjO1JU-S6Urwdx-22zU2TUjG9CDQ1fXvHOvcPehfKTDwnLkz9a1E7KT1ZbNaWA_O9_V84i6t3sz1fO_bnFje7WTtXFsvraEshOl9um7aRZHpwLq_dTZOA5m7Xx3rapbwWPHs4xUQEDLCJc9QA3sACgRsCAjkSoaotVRVeV83udFvYEMCicgLU5UDAqFUCGwPJjEpAgOf_0hxgzxCNRgCgQgNT9IaXVm5qbZt_ojPQGAq0yITmgEBL-dCM6s_oBCUBGRh3iS04TECowee3BmwYUnOtzWJY4UxN1IN7Lar6epu6W83q3uvd31U2-5o20e13yQLIVWECqOWXlCedjduu_PuFlt36fdu6HFzo465ccscPT8venFeA9o_Oy96ia2DFxn9h7xGl8zKBnUuhcZObu8rDTtKfVoFjFGMzTRoWagQ10qGdW9zXNWguhChNs0tbQ6ueL3SRiHP_o76IYmeJNktEj0k2V2SfZL0-QJPo5Ok8XES7ZLG__p3o-rtd6l82iYRMBi-rP472-uC6gMe62oAvJ_yqcb6z3kV346nGgks-CPO0KDKEpFok4TAjCqwLD_8CQAA__-76dKf"}}) + require.NoError(t, runQuery(t, r)) - // Drain the second node and expect the query to be planned on only the - // first node. - distServer := tc.Server(1).DistSQLServer().(*distsql.ServerImpl) - distServer.Drain(ctx, 0 /* flowDrainWait */, nil /* reporter */) + // Verify the plan should include all 5 nodes. + require.NoError(t, checkQueryPlan(t, r, + []string{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklV9vmzAUxd_3KdCVpraSq2AghPDUqmFqtPTPQqZNqqLKC7cMleDUNmqrKt99IjQNYQVB4CERxj7n8Lv25Q3kUwwu-N7Eu5hpUfLAtW_Tmyvtzvt9OzkfX2vHo7E_839MiOZfnt96J9r7VJkuj1_kU8oEBif5GjXXfl16Uy-XmYy_e9rRKGKhYMuvR0Ag4QFesyVKcO-AAgEDCJhAwAICfZgTWAm-QCm5yKa8bRaMgxdwdQJRskpVNjwnsOACwX0DFakYwYUZ-xPjFFmAoqcDgQAVi-KNjTpT96tHfAUCFzxOl4l0tW1sIOCvWDbSo7rdo6f5X8_QYb4mwFO1s5SKhQguLWQcj8DV16R5zPMwFBgyxUWvv5_S_3l1fEZPKm2Nkm2_0nbnliZcBJi_5s5qvq4PRvV2ycxSMrpPhDYvHD28cD1D39bOal472iZpAZHdrXb2nq3RHJDRAZD1AchuDshok7QAaNAN0GDP1mwOyOwAyP4A5DQHZLZJWgDkdAPk7NlazQFZHQA574CMxnSsNjELdIbd6AzbtOQpyhVPJJZ65OdOesnplGbNFIMQ884reSoWeCv4YjM3v73ZCG0GApQqf0rzm3GyfSSVQLb8-KIUlWitkrGnRItK_bKSUZ-pTSizVsqqVqJlJevQ17PLSv1aJbs6k1FWsg_NNCgrDWqVnOpMZlnJOTSTU1Ya1m8DvTqU9d_erN_mNamG2dF5iPnzfRSAC_r7dfrJz_aCbAELZXZ-_b_8eSM7e11lp--BxRIJXLFHHKFCsYySSKpoAa4SKa7XX_4FAAD__5H-gCw="})) - expectPlan([][]string{{"Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyUkVFr2z4Uxd__n0Ic-NNkKNTuo54WEo-apUlme3RQTNDsG0_UljxJpivB333Ybre2rGXTg-Dee3x-x1cnuO81BNJoE60ypvTRsA_J7ordRF_2m2W8ZbN1nGbppw1n6eVyH83Zg7Qwnfazd_NJrrvG5ez6MkqiyWQTf4zY2VrJysrm_zNwaFPSVjbkIG4QIudorSnIOWOH1mkUxOUPiIBD6bbzQzvnKIwliBO88jVBIJNfa0pIlmTPA3CU5KWqR9shxvvhOrS3dA-Olam7RjsBjrSV2gm2QN5zmM7_BjgvK4IInySK1xBBz_8-1LKqLFXSG3sePs-02n3eZodkd53O5q-yL16ww39hJ-Raox09475GCl6QFmGfc1BZ0fQKznS2oL01xaidyt1oNDZKcn6ahlMR68eR85Zk82t1T53CN50u3nLKOY61uTuoEgLBw1n84Xo8GD6QlRtWlH4zd6Ntdt8OP3iUtSOOK3lLa_JkG6WV86qA8Lajvv_vZwAAAP__7rz6og=="}}) + // Drain the fourth node. + drainServer(t, lastServer, 4 /* idx */) - // Verify correctness. - var res int - if err := db.QueryRow(query).Scan(&res); err != nil { - t.Fatal(err) - } - if res != numNodes { - t.Fatalf("expected %d rows but got %d", numNodes, res) - } + // Since we only stopped the server, the storage node would be still up. So + // our range for the 5th storage node could be randomly planned on any of the + // remaining nodes as all of them are equally "close" here. + expectedPlans := []string{ + // Planned on 1st node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF1ro04Uxu__n2I48KctTImjxmS9amlcGjZ92ZhlF0oos_HUSo1jZ0baUvLdF7VpjDSiiRcJzsvveeY543kH9RyDC7438S5mJEoeBPk-vbkid96f28n5-Jocj8b-zP85ocS_PL_1TsjHUpUtj1_Vc8YlBiflHj0nvy-9qVdiJuMfHjkaRTyUfPn_EVBIRIDXfIkK3DtgQMEEChZQsGFOIZVigUoJmU-_F4vHwSu4BoUoSTOdD88pLIREcN9BRzpGcGHG_8Y4RR6g7BlAIUDNo7iQ0Gf6Pn3CN6BwIeJsmSiXrC0DBT_l-UiPGU6PnZZ_PdMgPAkII0I_ooT5ioLI9EZeaR4iuKzidzwC11jR9pbPw1BiyLWQPXvbsf_r6viMneyUNWuy9k7ZjVqWCBlgeeSN1HzVbGzYzZhVMzbcMsbal5DtX8KeaayraButK8e6OK0E1D-scv0tWbN9QOYBAdmfATntAzK7OK0E5BwWkLMla7UPyDogIOczoGH7gKwuTisBDQ4LaNCl5UxRpSJRWOsBXysZNaVTljcLDEIsO4sSmVzgrRSLYm35elOAioEAlS5nWfkyTtZTSkvky8-OWSWxRpK5RWJVkl0nmY2kbx08WY0kezeJ1Un2vqfr10n9RpKz25NZJzn7enLqpEEjabjbk1UnDff1NMjv6EMsXu6jAFwwPp7TL37WD-QbeKjyD8V_FC8FdvaW5tf8gccKKVzxJxyhRrmMkkjpaAGulhmuVv_9CwAA__-0ofXg", + + // Planned on 2nd node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF1ro04Uxu__n2I48KctTImjxmS9amlcGjZ92ZhlF0oos_HUSo1jZ0baUvLdF7VpjDSiiRcJzsvveeY543kH9RyDC7438S5mJEoeBPk-vbkid96f28n5-Jocj8b-zP85ocS_PL_1TsjHUpUtj1_Vc8YlBiflHj0nvy-9qVdiJuMfHjkaRTyUfPn_EVBIRIDXfIkK3DtgQMEEChZQsGFOIZVigUoJmU-_F4vHwSu4BoUoSTOdD88pLIREcN9BRzpGcGHG_8Y4RR6g7BlAIUDNo7iQ0Gf6Pn3CN6BwIeJsmSiXrC0DBT_l-UiPGU6PnZZ_PdOA-YqCyPRGUmkeIris4nE8AtdY0fY2z8NQYsi1kD1726X_6-r4jJ3slDVrsvZO2Y1alggZYHnMjdR81Wxs2M2YVTM23DLG2peN7V-2nmmsK2cbhCcBYUToR5Stq8i6uK6E1T-siv0tWbN9WOYBYdmfYTntr7nZxWklIOewgJwtWat9QNYBATmfAQ3bB2R1cVoJaHBYQIMu7WeKKhWJwlo_-FrJqCmdsrxxYBBi2WWUyOQCb6VYFGvL15sCVAwEqHQ5y8qXcbKeUloiX352zyqJNZLMLRKrkuw6yWwkfevgyWok2btJrE6y9z1dv07qN5Kc3Z7MOsnZ15NTJw0aScPdnqw6abivp0F-Rx9i8XIfBeCC8fGcfvGzfiDfwEOVfyj-o3gpsLO3NL_mDzxWSOGKP-EINcpllERKRwtwtcxwtfrvXwAAAP__oDX14A==", + + // Planned on 3rd node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF1ro04Uxu__n2I48KctTImjxmS9amlcGjZ92ZhlF0oos_HUSo1jZ0baUvLdF7VpjDSiiRcJzsvveeY543kH9RyDC7438S5mJEoeBPk-vbkid96f28n5-Jocj8b-zP85ocS_PL_1TsjHUpUtj1_Vc8YlBiflHj0nvy-9qVdiJuMfHjkaRTyUfPn_EVBIRIDXfIkK3DtgQMEEChZQsGFOIZVigUoJmU-_F4vHwSu4BoUoSTOdD88pLIREcN9BRzpGcGHG_8Y4RR6g7BlAIUDNo7iQ0Gf6Pn3CN6BwIeJsmSiXrC0DBT_l-UiPGU6PnZZ_PdOA-YqCyPRGUmkeIris4nE8AtdY0fY2z8NQYsi1kD1726X_6-r4jJ3slDVrsvZO2Y1alggZYHnMjdR81Wxs2M2YVTM23DLG2peN7V-2nmmsK2e3rxzr4rQSUP-wyvW3ZM32AZkHBGR_BuQYhCcBYUToR5StwzK7uK6E5RwWlrMla7UPyzogLOczrGH722R1cVoJaHBYQIMu7WeKKhWJwlo_-FrJqCmdsrxxYBBi2WWUyOQCb6VYFGvL15sCVAwEqHQ5y8qXcbKeUloiX352zyqJNZLMLRKrkuw6yWwkfevgyWok2btJrE6y9z1dv07qN5Kc3Z7MOsnZ15NTJw0aScPdnqw6abivp0F-Rx9i8XIfBeCC8fGcfvGzfiDfwEOVfyj-o3gpsLO3NL_mDzxWSOGKP-EINcpllERKRwtwtcxwtfrvXwAAAP__hnf14A==", + + // Planned on 4th node + "Diagram: https://cockroachdb.github.io/distsqlplan/decode.html#eJyklF9ro0AUxd_3U8iFpS1MiaPGZH1qaVwqm_7ZmGUXSiiz8daVqmNnRtpS8t0XtWmMNKKJDwnOn985c-5430A-xeCA707di7kWpQ9c-z67udLu3D-303PvWjueeP7c_zklmn95fuueaO9LZZ4cv8innAkMTqo9aqH9vnRnboWZej9c7WgSsVCw5OsREEh5gNcsQQnOHVAgYAABEwhYsCCQCb5EKbkopt_KxV7wAo5OIEqzXBXDCwJLLhCcN1CRihEcmLO_Mc6QBSgGOhAIULEoLiXUmbrPHvEVCFzwOE9S6Whry0DAz1gxMqC6PaCn1d_A0GGxIsBztZGUioUIDq159Cbg6CvS3eZ5GAoMmeJiYG279H9dHZ_Rk52yRkPW2im7UctTLgKsjrmRWqzajY37GTMbxsZbxmj3stH9yzYw9HXlrO6Vo32c1gIaHla54Zas0T0g44CArI-A7O4BGX2c1gKyDwvI3pI1uwdkHhCQ_R6Q0Tkds4_NWjqjw9IZ9ek3M5QZTyU2GsDnSnpD6ZQWnQKDEKu2Inkulngr-LJcW73elKByIECpqllavXjpekoqgSz5aJd1Em0lGVskWidZTZLRSvrWw5PZSrJ2k2iTZO17umGTNGwl2bs9GU2Sva8nu0katZLGuz2ZTdJ4X0-j4o4-xPz5PgrAAf39Of3kZ_1AsYGFsvhQ_H_8ucTOX7Pimj-wWCKBK_aIE1QokiiNpIqW4CiR42r15X8AAAD__-4H8WQ=", + } + // Range for 5th node should be planned on the gateway + testutils.SucceedsWithin(t, func() error { + return checkQueryPlan(t, r, expectedPlans) + }, 4*time.Second) + + // Now run the query to ensure the correct result + require.NoError(t, runQuery(t, r)) + }) } // testSpanResolverRange describes a range in a test. The ranges are specified diff --git a/pkg/sql/doctor/doctor.go b/pkg/sql/doctor/doctor.go index 7655ab488a2a..4f04b55c80c8 100644 --- a/pkg/sql/doctor/doctor.go +++ b/pkg/sql/doctor/doctor.go @@ -214,7 +214,7 @@ func ExamineDescriptors( err := cb.ValidateNamespaceEntry(row) if err != nil { problemsFound = true - nsReport(stdout, row, err.Error()) + nsReport(stdout, row, "%s", err) } else if verbose { nsReport(stdout, row, "processed") } diff --git a/pkg/sql/drop_role.go b/pkg/sql/drop_role.go index ac7e93eada31..6bf78b957fca 100644 --- a/pkg/sql/drop_role.go +++ b/pkg/sql/drop_role.go @@ -163,7 +163,7 @@ func (n *DropRoleNode) startExec(params runParams) error { // privileges are added. for _, tbID := range lCtx.tbIDs { tableDescriptor := lCtx.tbDescs[tbID] - if !descriptorIsVisible(tableDescriptor, true /*allowAdding*/) { + if !descriptorIsVisible(tableDescriptor, true /*allowAdding*/, false /* includeDropped */) { continue } if _, ok := userNames[tableDescriptor.GetPrivileges().Owner()]; ok { @@ -192,7 +192,7 @@ func (n *DropRoleNode) startExec(params runParams) error { } } for _, schemaDesc := range lCtx.schemaDescs { - if !descriptorIsVisible(schemaDesc, true /* allowAdding */) { + if !descriptorIsVisible(schemaDesc, true /* allowAdding */, false /* includeDropped */) { continue } if _, ok := userNames[schemaDesc.GetPrivileges().Owner()]; ok { @@ -235,7 +235,7 @@ func (n *DropRoleNode) startExec(params runParams) error { } for _, typDesc := range lCtx.typDescs { if _, ok := userNames[typDesc.GetPrivileges().Owner()]; ok { - if !descriptorIsVisible(typDesc, true /* allowAdding */) { + if !descriptorIsVisible(typDesc, true /* allowAdding */, false /* includeDropped */) { continue } tn, err := getTypeNameFromTypeDescriptor(lCtx, typDesc) @@ -265,7 +265,7 @@ func (n *DropRoleNode) startExec(params runParams) error { } for _, fnDesc := range lCtx.fnDescs { if _, ok := userNames[fnDesc.GetPrivileges().Owner()]; ok { - if !descriptorIsVisible(fnDesc, true /* allowAdding */) { + if !descriptorIsVisible(fnDesc, true /* allowAdding */, false /* includeDropped */) { continue } name, err := getFunctionNameFromFunctionDescriptor(lCtx, fnDesc) diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 2bd764c07b64..a1b90e201594 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -54,6 +54,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/server/license" "github.com/cockroachdb/cockroach/pkg/server/pgurl" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" @@ -103,6 +104,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessionphase" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilegecache" @@ -1303,6 +1305,7 @@ type ExecutorConfig struct { UnusedIndexRecommendationsKnobs *idxusage.UnusedIndexRecommendationTestingKnobs ExternalConnectionTestingKnobs *externalconn.TestingKnobs EventLogTestingKnobs *EventLogTestingKnobs + InsightsTestingKnobs *insights.TestingKnobs // HistogramWindowInterval is (server.Config).HistogramWindowInterval. HistogramWindowInterval time.Duration @@ -1465,6 +1468,9 @@ type ExecutorConfig struct { // CidrLookup is used to look up the tag name for a given IP address. CidrLookup *cidr.Lookup + + // LicenseEnforcer is used to enforce the license profiles. + LicenseEnforcer *license.Enforcer } // UpdateVersionSystemSettingHook provides a callback that allows us @@ -1818,7 +1824,7 @@ type StreamingTestingKnobs struct { // BeforeClientSubscribe allows observation of parameters about to be passed // to a streaming client - BeforeClientSubscribe func(addr string, token string, frontier span.Frontier) + BeforeClientSubscribe func(addr string, token string, frontier span.Frontier, filterRangefeed bool) // BeforeIngestionStart allows blocking the stream ingestion job // before a stream ingestion happens. @@ -1859,6 +1865,8 @@ type StreamingTestingKnobs struct { SkipSpanConfigReplication bool SpanConfigRangefeedCacheKnobs *rangefeedcache.TestingKnobs + + FailureRate uint32 } var _ base.ModuleTestingKnobs = &StreamingTestingKnobs{} @@ -3860,6 +3868,10 @@ func (m *sessionDataMutator) SetOptimizerUseConditionalHoistFix(val bool) { m.data.OptimizerUseConditionalHoistFix = val } +func (m *sessionDataMutator) SetOptimizerPushLimitIntoProjectFilteredScan(val bool) { + m.data.OptimizerPushLimitIntoProjectFilteredScan = val +} + // Utility functions related to scrubbing sensitive information on SQL Stats. // quantizeCounts ensures that the Count field in the diff --git a/pkg/sql/execinfrapb/BUILD.bazel b/pkg/sql/execinfrapb/BUILD.bazel index dc0a8257584f..8c5d9fc1f983 100644 --- a/pkg/sql/execinfrapb/BUILD.bazel +++ b/pkg/sql/execinfrapb/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "expr.go", "flow_diagram.go", "processors.go", - "testutils.go", ], embed = [":execinfrapb_go_proto"], importpath = "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb", @@ -21,9 +20,7 @@ go_library( deps = [ "//pkg/base", "//pkg/roachpb", - "//pkg/rpc", "//pkg/security/username", - "//pkg/settings/cluster", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catenumpb", @@ -45,13 +42,9 @@ go_library( "//pkg/util/buildutil", "//pkg/util/duration", "//pkg/util/encoding", - "//pkg/util/hlc", "//pkg/util/humanizeutil", - "//pkg/util/netutil", "//pkg/util/optional", "//pkg/util/protoutil", - "//pkg/util/stop", - "//pkg/util/syncutil", "//pkg/util/tracing/tracingpb", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", @@ -59,7 +52,6 @@ go_library( "@com_github_cockroachdb_redact//:redact", "@com_github_dustin_go_humanize//:go-humanize", "@com_github_gogo_protobuf//types", - "@org_golang_google_grpc//:go_default_library", ], ) diff --git a/pkg/sql/execinfrapb/processors_bulk_io.proto b/pkg/sql/execinfrapb/processors_bulk_io.proto index ca72b7dcea8f..74e51516e69b 100644 --- a/pkg/sql/execinfrapb/processors_bulk_io.proto +++ b/pkg/sql/execinfrapb/processors_bulk_io.proto @@ -464,7 +464,7 @@ message GenerativeSplitAndScatterSpec { // MaxFileCount is the max number of files in an extending restore span entry. optional int64 max_file_count = 23[(gogoproto.nullable) = false]; // SQLInstanceIDs is a slice of SQL instance IDs available for dist restore. - repeated int32 sql_instance_ids = 24[(gogoproto.nullable) = false, (gogoproto.customname) = "SQLInstanceIDs"]; + repeated int32 sql_instance_ids = 24[(gogoproto.customname) = "SQLInstanceIDs"]; reserved 19; } @@ -530,4 +530,12 @@ message LogicalReplicationWriterSpec { // TableReplicationMetadata is a map from destination table IDs to metadata // containing source table descriptors and fully qualified destination table names. map table_metadata = 8 [(gogoproto.nullable) = false]; + + // IgnoreCDCIgnoredTTLDeletes is an option on whether to filter out 'OmitinRangefeed' events + // when processing changes in LDR + optional bool ignore_cdc_ignored_ttl_deletes = 9 [(gogoproto.nullable) = false, (gogoproto.customname) = "IgnoreCDCIgnoredTTLDeletes"]; + + optional jobs.jobspb.LogicalReplicationDetails.ApplyMode mode = 10 [(gogoproto.nullable) = false]; + + // Next ID: 11. } diff --git a/pkg/sql/explain_bundle.go b/pkg/sql/explain_bundle.go index a868c382b74d..192ed3f588f8 100644 --- a/pkg/sql/explain_bundle.go +++ b/pkg/sql/explain_bundle.go @@ -533,7 +533,7 @@ func (b *stmtBundleBuilder) addInFlightTrace(c inFlightTraceCollector) { // as well as accumulates the string into b.errorStrings. The method should only // be used for non-critical errors. func (b *stmtBundleBuilder) printError(errString string, buf *bytes.Buffer) { - fmt.Fprintf(buf, errString+"\n") + fmt.Fprintln(buf, errString) b.errorStrings = append(b.errorStrings, errString) } diff --git a/pkg/sql/explain_test.go b/pkg/sql/explain_test.go index 698f84e6aedd..651b198873a7 100644 --- a/pkg/sql/explain_test.go +++ b/pkg/sql/explain_test.go @@ -84,7 +84,6 @@ func TestStatementReuses(t *testing.T) { `ALTER TABLE a SCATTER`, `ALTER INDEX a@woo RENAME TO waa`, - `ALTER INDEX a@woo CONFIGURE ZONE USING DEFAULT`, `ALTER INDEX a@woo SPLIT AT VALUES(1)`, `ALTER INDEX a@woo SCATTER`, diff --git a/pkg/sql/flowinfra/BUILD.bazel b/pkg/sql/flowinfra/BUILD.bazel index e140daded014..4b35f4e6dfd7 100644 --- a/pkg/sql/flowinfra/BUILD.bazel +++ b/pkg/sql/flowinfra/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "stream_decoder.go", "stream_encoder.go", "testing_knobs.go", + "testutils.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/flowinfra", visibility = ["//visibility:public"], @@ -19,7 +20,9 @@ go_library( "//pkg/base", "//pkg/kv", "//pkg/roachpb", + "//pkg/rpc", "//pkg/settings", + "//pkg/settings/cluster", "//pkg/sql/catalog/catenumpb", "//pkg/sql/catalog/colinfo", "//pkg/sql/execinfra", @@ -32,23 +35,29 @@ go_library( "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/types", + "//pkg/util", "//pkg/util/admission", "//pkg/util/admission/admissionpb", "//pkg/util/buildutil", "//pkg/util/cancelchecker", "//pkg/util/ctxlog", + "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/mon", + "//pkg/util/netutil", "//pkg/util/optional", "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/tracing/tracingpb", + "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_logtags//:logtags", "@com_github_cockroachdb_redact//:redact", "@com_github_gogo_protobuf//proto", "@io_opentelemetry_go_otel//attribute", + "@org_golang_google_grpc//:go_default_library", ], ) diff --git a/pkg/sql/flowinfra/outbox_test.go b/pkg/sql/flowinfra/outbox_test.go index 7fc72e965111..7e55a7ee5d96 100644 --- a/pkg/sql/flowinfra/outbox_test.go +++ b/pkg/sql/flowinfra/outbox_test.go @@ -59,7 +59,7 @@ func TestOutbox(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + clusterID, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestOutboxInitializesStreamBeforeReceivingAnyRows(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + clusterID, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { t.Fatal(err) } @@ -297,7 +297,7 @@ func TestOutboxClosesWhenConsumerCloses(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + clusterID, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { t.Fatal(err) } @@ -375,7 +375,7 @@ func TestOutboxCancelsFlowOnError(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) clock := hlc.NewClockForTesting(nil) - clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + clusterID, mockServer, addr, err := flowinfra.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { t.Fatal(err) } @@ -501,7 +501,7 @@ func BenchmarkOutbox(b *testing.B) { stopper := stop.NewStopper() defer stopper.Stop(bgCtx) clock := hlc.NewClockForTesting(nil) - clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(bgCtx, clock, stopper, execinfra.StaticSQLInstanceID) + clusterID, mockServer, addr, err := flowinfra.StartMockDistSQLServer(bgCtx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { b.Fatal(err) } diff --git a/pkg/sql/execinfrapb/testutils.go b/pkg/sql/flowinfra/testutils.go similarity index 87% rename from pkg/sql/execinfrapb/testutils.go rename to pkg/sql/flowinfra/testutils.go index 402a4863a709..27c00bf7ff5f 100644 --- a/pkg/sql/execinfrapb/testutils.go +++ b/pkg/sql/flowinfra/testutils.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package execinfrapb +package flowinfra import ( "context" @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/netutil" @@ -52,7 +53,7 @@ func StartMockDistSQLServer( return uuid.Nil, nil, nil, err } mock := newMockDistSQLServer() - RegisterDistSQLServer(server, mock) + execinfrapb.RegisterDistSQLServer(server, mock) ln, err := netutil.ListenAndServeGRPC(stopper, server, util.IsolatedTestAddr) if err != nil { return uuid.Nil, nil, nil, err @@ -70,12 +71,12 @@ type MockDistSQLServer struct { // that a new gRPC call has arrived and thus a stream has arrived. The rpc // handler is blocked until Donec is signaled. type InboundStreamNotification struct { - Stream DistSQL_FlowStreamServer + Stream execinfrapb.DistSQL_FlowStreamServer Donec chan<- error } // MockDistSQLServer implements the DistSQLServer interface. -var _ DistSQLServer = &MockDistSQLServer{} +var _ execinfrapb.DistSQLServer = &MockDistSQLServer{} func newMockDistSQLServer() *MockDistSQLServer { return &MockDistSQLServer{ @@ -85,20 +86,20 @@ func newMockDistSQLServer() *MockDistSQLServer { // SetupFlow is part of the DistSQLServer interface. func (ds *MockDistSQLServer) SetupFlow( - _ context.Context, req *SetupFlowRequest, -) (*SimpleResponse, error) { + _ context.Context, req *execinfrapb.SetupFlowRequest, +) (*execinfrapb.SimpleResponse, error) { return nil, nil } // CancelDeadFlows is part of the DistSQLServer interface. func (ds *MockDistSQLServer) CancelDeadFlows( - _ context.Context, req *CancelDeadFlowsRequest, -) (*SimpleResponse, error) { + _ context.Context, req *execinfrapb.CancelDeadFlowsRequest, +) (*execinfrapb.SimpleResponse, error) { return nil, nil } // FlowStream is part of the DistSQLServer interface. -func (ds *MockDistSQLServer) FlowStream(stream DistSQL_FlowStreamServer) error { +func (ds *MockDistSQLServer) FlowStream(stream execinfrapb.DistSQL_FlowStreamServer) error { donec := make(chan error) ds.InboundStreams <- InboundStreamNotification{Stream: stream, Donec: donec} return <-donec @@ -107,7 +108,7 @@ func (ds *MockDistSQLServer) FlowStream(stream DistSQL_FlowStreamServer) error { // MockDialer is a mocked implementation of the Outbox's `Dialer` interface. // Used to create a connection with a client stream. type MockDialer struct { - // Addr is assumed to be obtained from execinfrapb.StartMockDistSQLServer. + // Addr is assumed to be obtained from flowinfra.StartMockDistSQLServer. Addr net.Addr mu struct { syncutil.Mutex diff --git a/pkg/sql/flowinfra/utils_test.go b/pkg/sql/flowinfra/utils_test.go index 59579b294198..35455b1a6b71 100644 --- a/pkg/sql/flowinfra/utils_test.go +++ b/pkg/sql/flowinfra/utils_test.go @@ -43,7 +43,7 @@ func createDummyStream( stopper := stop.NewStopper() ctx := context.Background() clock := hlc.NewClockForTesting(nil) - storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) + storageClusterID, mockServer, addr, err := StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index 95e80cc30eab..3cf97efbea09 100644 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -345,13 +345,10 @@ var informationSchemaCheckConstraints = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-check-constraints.html`, schema: vtable.InformationSchemaCheckConstraints, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* no constraints in virtual tables */ + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) for _, ck := range table.EnforcedCheckConstraints() { @@ -408,9 +405,10 @@ var informationSchemaColumnPrivileges = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-column-privileges.html`, schema: vtable.InformationSchemaColumnPrivileges, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, virtualMany, func( - ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, - ) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany} + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) columndata := privilege.List{privilege.SELECT, privilege.INSERT, privilege.UPDATE} // privileges for column level granularity @@ -453,10 +451,11 @@ https://www.postgresql.org/docs/9.5/infoschema-columns.html`, if err != nil { return err } - // Get the collations for all comments of current database. - return forEachTableDesc(ctx, p, dbContext, virtualMany, func( - ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, - ) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany} + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table + // Get the collations for all comments of current database. if table.IsSequence() { return nil } @@ -619,8 +618,10 @@ var informationSchemaColumnUDTUsage = virtualSchemaTable{ https://www.postgresql.org/docs/current/infoschema-column-udt-usage.html`, schema: vtable.InformationSchemaColumnUDTUsage, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) @@ -792,13 +793,10 @@ var informationSchemaConstraintColumnUsageTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-constraint-column-usage.html`, schema: vtable.InformationSchemaConstraintColumnUsage, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* no constraints in virtual tables */ + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table, tableLookup := descCtx.database, descCtx.schema, descCtx.table, descCtx.tableLookup dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) for _, c := range table.AllConstraints() { @@ -854,13 +852,10 @@ var informationSchemaKeyColumnUsageTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-key-column-usage.html`, schema: vtable.InformationSchemaKeyColumnUsage, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* no constraints in virtual tables */ + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) @@ -984,18 +979,15 @@ var informationSchemaReferentialConstraintsTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-referential-constraints.html`, schema: vtable.InformationSchemaReferentialConstraints, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* no constraints in virtual tables */ + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) for _, fk := range table.OutboundForeignKeys() { - refTable, err := tableLookup.getTableByID(fk.GetReferencedTableID()) + refTable, err := descCtx.tableLookup.getTableByID(fk.GetReferencedTableID()) if err != nil { return err } @@ -1308,8 +1300,10 @@ var informationSchemaSequences = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-sequences.html`, schema: vtable.InformationSchemaSequences, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* no sequences in virtual schemas */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + return forEachTableDesc(ctx, p, dbContext, + forEachTableDescOptions{virtualOpts: hideVirtual}, /* no sequences in virtual schemas */ + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table if !table.IsSequence() { return nil } @@ -1348,8 +1342,10 @@ var informationSchemaStatisticsTable = virtualSchemaTable{ ` + docs.URL("information-schema.html#statistics"), schema: vtable.InformationSchemaStatistics, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual tables have no indexes */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + return forEachTableDesc(ctx, p, dbContext, + forEachTableDescOptions{virtualOpts: hideVirtual}, /* virtual tables have no indexes */ + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) @@ -1458,14 +1454,10 @@ var informationSchemaTableConstraintTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-table-constraints.html`, schema: vtable.InformationSchemaTableConstraint, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual, /* virtual tables have no constraints */ - func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual tables have no constraints */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) @@ -1612,8 +1604,10 @@ func populateTablePrivileges( dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error, ) error { - return forEachTableDesc(ctx, p, dbContext, virtualMany, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(sc.GetName()) tbNameStr := tree.NewDString(table.GetName()) @@ -1674,47 +1668,40 @@ var informationSchemaTablesTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-tables.html`, schema: vtable.InformationSchemaTables, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, virtualMany, addTablesTableRow(addRow)) + opts := forEachTableDescOptions{virtualOpts: virtualMany} + return forEachTableDesc(ctx, p, dbContext, opts, + func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table + if table.IsSequence() { + return nil + } + tableType := tableTypeBaseTable + insertable := yesString + if table.IsVirtualTable() { + tableType = tableTypeSystemView + insertable = noString + } else if table.IsView() { + tableType = tableTypeView + insertable = noString + } else if table.IsTemporary() { + tableType = tableTypeTemporary + } + dbNameStr := tree.NewDString(db.GetName()) + scNameStr := tree.NewDString(sc.GetName()) + tbNameStr := tree.NewDString(table.GetName()) + return addRow( + dbNameStr, // table_catalog + scNameStr, // table_schema + tbNameStr, // table_name + tableType, // table_type + insertable, // is_insertable_into + tree.NewDInt(tree.DInt(table.GetVersion())), // version + ) + }) }, } -func addTablesTableRow( - addRow func(...tree.Datum) error, -) func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, -) error { - return func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { - if table.IsSequence() { - return nil - } - tableType := tableTypeBaseTable - insertable := yesString - if table.IsVirtualTable() { - tableType = tableTypeSystemView - insertable = noString - } else if table.IsView() { - tableType = tableTypeView - insertable = noString - } else if table.IsTemporary() { - tableType = tableTypeTemporary - } - dbNameStr := tree.NewDString(db.GetName()) - scNameStr := tree.NewDString(sc.GetName()) - tbNameStr := tree.NewDString(table.GetName()) - return addRow( - dbNameStr, // table_catalog - scNameStr, // table_schema - tbNameStr, // table_name - tableType, // table_type - insertable, // is_insertable_into - tree.NewDInt(tree.DInt(table.GetVersion())), // version - ) - } -} - // Postgres: https://www.postgresql.org/docs/9.6/static/infoschema-views.html // MySQL: https://dev.mysql.com/doc/refman/5.7/en/views-table.html var informationSchemaViewsTable = virtualSchemaTable{ @@ -1723,8 +1710,10 @@ var informationSchemaViewsTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/infoschema-views.html`, schema: vtable.InformationSchemaViews, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual schemas have no views */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual schemas have no views */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table if !table.IsView() { return nil } @@ -1781,7 +1770,7 @@ https://www.postgresql.org/docs/current/infoschema-collations.html`, // Postgres: https://www.postgresql.org/docs/current/infoschema-collation-character-set-applicab.html // MySQL: https://dev.mysql.com/doc/refman/8.0/en/information-schema-collation-character-set-applicability-table.html var informationSchemaCollationCharacterSetApplicability = virtualSchemaTable{ - comment: `identifies which character set the available collations are + comment: `identifies which character set the available collations are applicable to. As UTF-8 is the only available encoding this table does not provide much useful information. https://www.postgresql.org/docs/current/infoschema-collation-character-set-applicab.html`, @@ -1914,7 +1903,8 @@ var informationSchemaRoleRoutineGrantsTable = virtualSchemaTable{ if err != nil { return err } - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, fn, db, false /* allowAdding */) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, fn, db, false /* allowAdding */, false /* includeDropped */) if err != nil { return err } @@ -2478,7 +2468,8 @@ func forEachSchema( var schemas []catalog.SchemaDescriptor if err := c.ForEachDescriptor(func(desc catalog.Descriptor) error { if requiresPrivileges { - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, desc, db, false /* allowAdding */) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, desc, db, false /* allowAdding */, false /* includeDropped */) if err != nil { return err } @@ -2550,7 +2541,7 @@ func forEachDatabaseDesc( for _, dbDesc := range dbDescs { canSeeDescriptor := !requiresPrivileges if requiresPrivileges { - hasPriv, err := userCanSeeDescriptor(ctx, p, dbDesc, nil /* parentDBDesc */, false /* allowAdding */) + hasPriv, err := userCanSeeDescriptor(ctx, p, dbDesc, nil /* parentDBDesc */, false /* allowAdding */, false /* includeDropped */) if err != nil { return err } @@ -2596,7 +2587,8 @@ func forEachTypeDesc( if err != nil { return err } - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, typ, dbDesc, false /* allowAdding */) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, typ, dbDesc, false /* allowAdding */, false /* includeDropped */) if err != nil { return err } @@ -2610,36 +2602,6 @@ func forEachTypeDesc( return nil } -// forEachTableDesc retrieves all table descriptors from the current -// database and all system databases and iterates through them. For -// each table, the function will call fn with its respective database -// and table descriptor. -// -// The dbContext argument specifies in which database context we are -// requesting the descriptors. In context nil all descriptors are -// visible, in non-empty contexts only the descriptors of that -// database are visible. -// -// The virtualOpts argument specifies how virtual tables are made -// visible. -func forEachTableDesc( - ctx context.Context, - p *planner, - dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor) error, -) error { - return forEachTableDescWithTableLookup(ctx, p, dbContext, virtualOpts, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - _ tableLookupFn, - ) error { - return fn(ctx, db, sc, table) - }) -} - type virtualOpts int const ( @@ -2651,77 +2613,41 @@ const ( hideVirtual ) -// forEachTableDescAll does the same as forEachTableDesc but also -// includes newly added non-public descriptors. -func forEachTableDescAll( - ctx context.Context, - p *planner, - dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor) error, -) error { - return forEachTableDescAllWithTableLookup(ctx, p, dbContext, virtualOpts, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - _ tableLookupFn, - ) error { - return fn(ctx, db, sc, table) - }) +type forEachTableDescOptions struct { + virtualOpts virtualOpts + allowAdding bool + // Include dropped tables (but not garbage collected) when run at the cluster + // level (i.e., when database = ""). This only works when the scope is at the + // cluster level because `GetAllDescriptorsForDatabase` doesn't include such + // tables. + includeDropped bool } -// forEachTableDescAllWithTableLookup is like forEachTableDescAll, but it also -// provides a tableLookupFn like forEachTableDescWithTableLookup. If validate is -// set to false descriptors will not be validated for existence or consistency -// hence fn should be able to handle nil-s. -func forEachTableDescAllWithTableLookup( - ctx context.Context, - p *planner, - dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor, tableLookupFn) error, -) error { - return forEachTableDescWithTableLookupInternal( - ctx, p, dbContext, virtualOpts, true /* allowAdding */, fn, - ) +type tableDescContext struct { + database catalog.DatabaseDescriptor + schema catalog.SchemaDescriptor + table catalog.TableDescriptor + tableLookup tableLookupFn } -// forEachTableDescWithTableLookup acts like forEachTableDesc, except it also provides a -// tableLookupFn when calling fn to allow callers to lookup fetched table descriptors -// on demand. This is important for callers dealing with objects like foreign keys, where -// the metadata for each object must be augmented by looking at the referenced table. +// forEachTableDesc retrieves all table descriptors from the current +// database and all system databases and iterates through them. For +// each table, the function will call fn with its respective database +// and table descriptor. // // The dbContext argument specifies in which database context we are -// requesting the descriptors. In context "" all descriptors are +// requesting the descriptors. In context nil all descriptors are // visible, in non-empty contexts only the descriptors of that // database are visible. -func forEachTableDescWithTableLookup( - ctx context.Context, - p *planner, - dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor, tableLookupFn) error, -) error { - return forEachTableDescWithTableLookupInternal( - ctx, p, dbContext, virtualOpts, false /* allowAdding */, fn, - ) -} - -// forEachTableDescWithTableLookupInternal is the logic that supports -// forEachTableDescWithTableLookup. // -// The allowAdding argument if true includes newly added tables that -// are not yet public. -// The validate argument if false turns off checking if the descriptor ids exist -// and if they are valid. -func forEachTableDescWithTableLookupInternal( +// The virtualOpts argument specifies how virtual tables are made +// visible. +func forEachTableDesc( ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - allowAdding bool, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor, tableLookupFn) error, + opts forEachTableDescOptions, + fn func(context.Context, tableDescContext) error, ) (err error) { var all nstree.Catalog if dbContext != nil && useIndexLookupForDescriptorsInDatabase.Get(&p.EvalContext().Settings.SV) { @@ -2732,59 +2658,22 @@ func forEachTableDescWithTableLookupInternal( if err != nil { return err } - return forEachTableDescWithTableLookupInternalFromDescriptors( - ctx, p, dbContext, virtualOpts, allowAdding, all, fn) + return forEachTableDescFromDescriptors( + ctx, p, dbContext, all, opts, fn) } -func forEachTypeDescWithTableLookupInternalFromDescriptors( +func forEachTableDescFromDescriptors( ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, - allowAdding bool, c nstree.Catalog, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TypeDescriptor, tableLookupFn) error, + opts forEachTableDescOptions, + fn func(context.Context, tableDescContext) error, ) error { lCtx := newInternalLookupCtx(c.OrderedDescriptors(), dbContext) - for _, typID := range lCtx.typIDs { - typDesc := lCtx.typDescs[typID] - if typDesc.Dropped() { - continue - } - dbDesc, err := lCtx.getDatabaseByID(typDesc.GetParentID()) - if err != nil { - return err - } - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, typDesc, dbDesc, allowAdding) - if err != nil { - return err - } - if !canSeeDescriptor { - continue - } - sc, err := lCtx.getSchemaByID(typDesc.GetParentSchemaID()) - if err != nil { - return err - } - if err := fn(ctx, dbDesc, sc, typDesc, lCtx); err != nil { - return err - } - } - return nil -} - -func forEachTableDescWithTableLookupInternalFromDescriptors( - ctx context.Context, - p *planner, - dbContext catalog.DatabaseDescriptor, - virtualOpts virtualOpts, - allowAdding bool, - c nstree.Catalog, - fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TableDescriptor, tableLookupFn) error, -) error { - lCtx := newInternalLookupCtx(c.OrderedDescriptors(), dbContext) - - if virtualOpts == virtualMany || virtualOpts == virtualCurrentDB { + vOpts := opts.virtualOpts + if vOpts == virtualMany || vOpts == virtualCurrentDB { // Virtual descriptors first. vt := p.getVirtualTabler() vEntries := vt.getSchemas() @@ -2794,7 +2683,8 @@ func forEachTableDescWithTableLookupInternalFromDescriptors( virtSchemaEntry := vEntries[virtSchemaName] for _, tName := range virtSchemaEntry.orderedDefNames { te := virtSchemaEntry.defs[tName] - if err := fn(ctx, dbDesc, virtSchemaEntry.desc, te.desc, lCtx); err != nil { + if err := fn(ctx, tableDescContext{ + dbDesc, virtSchemaEntry.desc, te.desc, lCtx}); err != nil { return err } } @@ -2802,7 +2692,7 @@ func forEachTableDescWithTableLookupInternalFromDescriptors( return nil } - switch virtualOpts { + switch vOpts { case virtualCurrentDB: if err := iterate(dbContext); err != nil { return err @@ -2821,11 +2711,12 @@ func forEachTableDescWithTableLookupInternalFromDescriptors( for _, tbID := range lCtx.tbIDs { table := lCtx.tbDescs[tbID] dbDesc, parentExists := lCtx.dbDescs[table.GetParentID()] - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, table, dbDesc, allowAdding) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, table, dbDesc, opts.allowAdding, opts.includeDropped) if err != nil { return err } - if table.Dropped() || !canSeeDescriptor { + if !canSeeDescriptor { continue } var sc catalog.SchemaDescriptor @@ -2859,7 +2750,45 @@ func forEachTableDescWithTableLookupInternalFromDescriptors( } } } - if err := fn(ctx, dbDesc, sc, table, lCtx); err != nil { + if err := fn(ctx, tableDescContext{dbDesc, sc, table, lCtx}); err != nil { + return err + } + } + return nil +} + +func forEachTypeDescWithTableLookupInternalFromDescriptors( + ctx context.Context, + p *planner, + dbContext catalog.DatabaseDescriptor, + allowAdding bool, + c nstree.Catalog, + fn func(context.Context, catalog.DatabaseDescriptor, catalog.SchemaDescriptor, catalog.TypeDescriptor, tableLookupFn) error, +) error { + lCtx := newInternalLookupCtx(c.OrderedDescriptors(), dbContext) + + for _, typID := range lCtx.typIDs { + typDesc := lCtx.typDescs[typID] + if typDesc.Dropped() { + continue + } + dbDesc, err := lCtx.getDatabaseByID(typDesc.GetParentID()) + if err != nil { + return err + } + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, typDesc, dbDesc, allowAdding, false /* includeDropped */) + if err != nil { + return err + } + if !canSeeDescriptor { + continue + } + sc, err := lCtx.getSchemaByID(typDesc.GetParentSchemaID()) + if err != nil { + return err + } + if err := fn(ctx, dbDesc, sc, typDesc, lCtx); err != nil { return err } } @@ -2924,7 +2853,7 @@ FROM system.users AS u LEFT JOIN system.role_options AS ro ON ro.username = u.username - LEFT JOIN system.database_role_settings AS drs ON + LEFT JOIN system.database_role_settings AS drs ON drs.role_name = u.username AND drs.database_id = 0 GROUP BY u.username, "isRole", drs.settings; @@ -3022,9 +2951,13 @@ func forEachRoleMembership( } func userCanSeeDescriptor( - ctx context.Context, p *planner, desc, parentDBDesc catalog.Descriptor, allowAdding bool, + ctx context.Context, + p *planner, + desc, parentDBDesc catalog.Descriptor, + allowAdding bool, + includeDropped bool, ) (bool, error) { - if !descriptorIsVisible(desc, allowAdding) { + if !descriptorIsVisible(desc, allowAdding, includeDropped) { return false, nil } @@ -3056,8 +2989,8 @@ func userCanSeeDescriptor( return false, nil } -func descriptorIsVisible(desc catalog.Descriptor, allowAdding bool) bool { - return desc.Public() || (allowAdding && desc.Adding()) +func descriptorIsVisible(desc catalog.Descriptor, allowAdding bool, includeDropped bool) bool { + return desc.Public() || (allowAdding && desc.Adding()) || (includeDropped && desc.Dropped()) } // nameConcatOid is a Go version of the nameconcatoid builtin function. The diff --git a/pkg/sql/insert.go b/pkg/sql/insert.go index 4e1e14407b25..f146c1170f0f 100644 --- a/pkg/sql/insert.go +++ b/pkg/sql/insert.go @@ -196,7 +196,7 @@ func (r *insertRun) initRowContainer(params runParams, columns colinfo.ResultCol // processSourceRow processes one row from the source for insertion and, if // result rows are needed, saves it in the result row container. func (r *insertRun) processSourceRow(params runParams, rowVals tree.Datums) error { - if err := enforceLocalColumnConstraints(rowVals, r.insertCols); err != nil { + if err := enforceNotNullConstraints(rowVals, r.insertCols); err != nil { return err } diff --git a/pkg/sql/internal.go b/pkg/sql/internal.go index f59d40f25356..6c3336b22724 100644 --- a/pkg/sql/internal.go +++ b/pkg/sql/internal.go @@ -96,6 +96,7 @@ func NewInternalSessionData( sd.SearchPath = sessiondata.DefaultSearchPathForUser(username.NodeUserName()) sd.SequenceState = sessiondata.NewSequenceState() sd.Location = time.UTC + sd.StmtTimeout = 0 return sd } diff --git a/pkg/sql/logictest/REPOSITORIES.bzl b/pkg/sql/logictest/REPOSITORIES.bzl index cfd746d0cc44..0f5131c74fc0 100644 --- a/pkg/sql/logictest/REPOSITORIES.bzl +++ b/pkg/sql/logictest/REPOSITORIES.bzl @@ -7,17 +7,17 @@ CONFIG_DARWIN_AMD64 = "darwin-10.9-amd64" CONFIG_DARWIN_ARM64 = "darwin-11.0-arm64" _CONFIGS = [ - ("24.1.3", [ - (CONFIG_DARWIN_AMD64, "781239aa438ac93954e4e0156741ada3e066b70da47861ec28713f5c4918cca9"), - (CONFIG_DARWIN_ARM64, "d7736e0ca6a06fe4ac228785c014f594505b0301c836660efc7a9aaebc7cac2f"), - (CONFIG_LINUX_AMD64, "495d3fb2a417d180901e045a6faae324f5ea1bac27613e3f5ea5aa1def52f7f6"), - (CONFIG_LINUX_ARM64, "a98446801d8325cbb65a647a6d16fdc811e6bf52ffed792f954e8ece82bee859"), + ("24.1.4", [ + (CONFIG_DARWIN_AMD64, "f115b6e5824c741c7696478dcc7526552eff0f76f4fef78db331ceac570458ec"), + (CONFIG_DARWIN_ARM64, "5fe3352f1307931577cf4620f1a10f1e8cb7d2a5e1a038e737b7b9eb5cf84a18"), + (CONFIG_LINUX_AMD64, "3d63f699f413bd5e6a1ae63bfca9ba589faabbc1ea0c2716d7c20575d2399b28"), + (CONFIG_LINUX_ARM64, "0409a2a18eaedb6f2184ab01d5b65659023242152c110e4cf5a81db5cdcb67c9"), ]), - ("24.2.0-rc.1", [ - (CONFIG_DARWIN_AMD64, "72af70b866c541bef20fae74bb0177ba233c66e6a0311eee9e044d34cf43552e"), - (CONFIG_DARWIN_ARM64, "983ce231edaeeb0a076a95ae04f603cbe93dfb57e170666fff2d201e77903d2c"), - (CONFIG_LINUX_AMD64, "e8ef2e78f34736d094dc773d0c180dd876de5c4e6d863e0848c20b7a4805dcec"), - (CONFIG_LINUX_ARM64, "e789219222250a47c293ffbc97c644be2faf6ce86f2514369023d9cbb19ac9cd"), + ("24.2.0", [ + (CONFIG_DARWIN_AMD64, "37f0c320809a6f3212e9909da6bcbff38739db5a8e0cec9af95d9973c325c167"), + (CONFIG_DARWIN_ARM64, "1d100bfdda5a8e8604603445db50a3b8379f2ef1153c9d969b6ec3759befc9a8"), + (CONFIG_LINUX_AMD64, "59e29c52eb36748a475ffb355a074741592e9d330acfb484e6416ff70f658126"), + (CONFIG_LINUX_ARM64, "b2ea285ee9d79e2cd8c317c519387897d94e2107e3cf24b83d503738ee590ff4"), ]), ] diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index 3ab7ea227536..494c1048096b 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -18,6 +18,7 @@ import ( gosql "database/sql" "flag" "fmt" + "io/fs" "math/rand" "net" "net/url" @@ -1013,6 +1014,9 @@ type logicTest struct { cluster serverutils.TestClusterInterface // testserverCluster is the testserver cluster. This uses real binaries. testserverCluster testserver.TestServer + // logsDir is the directory where logs are located when using a + // testserverCluster. + logsDir string // sharedIODir is the ExternalIO directory that is shared between all clusters // created in the same logicTest. It is populated during setup() of the logic // test. @@ -1264,8 +1268,8 @@ func (t *logicTest) getOrOpenClient(user string, nodeIdx int, newSession bool) * if _, err := db.Exec("SET index_recommendations_enabled = false"); err != nil { t.Fatal(err) } - if t.cfg.EnableDefaultReadCommitted { - if _, err := db.Exec("SET default_transaction_isolation = 'READ COMMITTED'"); err != nil { + if iso := t.cfg.EnableDefaultIsolationLevel; iso != 0 { + if _, err := db.Exec(fmt.Sprintf("SET default_transaction_isolation = '%s'", iso)); err != nil { t.Fatal(err) } } @@ -1322,6 +1326,7 @@ func (t *logicTest) newTestServerCluster(bootstrapBinaryPath, upgradeBinaryPath _ = os.RemoveAll(logsDir) } } + t.logsDir = logsDir var envVars []string if strings.Contains(upgradeBinaryPath, "cockroach-short") { @@ -1350,16 +1355,9 @@ func (t *logicTest) newTestServerCluster(bootstrapBinaryPath, upgradeBinaryPath if err != nil { t.Fatal(err) } - for i := 0; i < t.cfg.NumNodes; i++ { - // Wait for each node to be reachable. - if err := ts.WaitForInitFinishForNode(i); err != nil { - t.Fatal(err) - } - } - t.testserverCluster = ts t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, ts.Stop, cleanupLogsDir) - + t.waitForAllNodes() t.setSessionUser(username.RootUser, 0 /* nodeIdx */, false /* newSession */) // These tests involve stopping and starting nodes, so to reduce flakiness, @@ -1371,6 +1369,60 @@ func (t *logicTest) newTestServerCluster(bootstrapBinaryPath, upgradeBinaryPath } } +// waitForAllNodes waits for each node to initialize when under +// cockroach-go-testserver logic test configurations. +func (t *logicTest) waitForAllNodes() { + if !t.cfg.UseCockroachGoTestserver { + return + } + for i := 0; i < t.cfg.NumNodes; i++ { + // Wait for each node to be reachable. + if err := t.testserverCluster.WaitForInitFinishForNode(i); err != nil { + if testutils.IsError(err, "init did not finish for node") { + // Check for `Can't find decompressor for snappy` error in the logs. + // This error appears to be some sort of infra issue where CRDB is + // unable to connect to another node, possibly because there is + // another non-CRDB server listening on that port. Since this is a rare + // issue, and we haven't been able to investigate it effectively, we + // will ignore this error. + // See https://github.com/cockroachdb/cockroach/issues/128759. + foundSnappyErr := false + walkErr := filepath.WalkDir(t.logsDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.Contains(scanner.Text(), "Can't find decompressor for snappy") { + foundSnappyErr = true + return filepath.SkipAll + } + } + if err := scanner.Err(); err != nil { + return err + } + return nil + }) + if walkErr != nil { + t.t().Logf("error while walking logs directory: %v", walkErr) + } else if foundSnappyErr { + t.t().Skip("ignoring init did not finish for node error due to snappy error") + } + } + t.Fatal(err) + } + } +} + // newCluster creates a new cluster. It should be called after the logic tests's // server args are configured. That is, either during setup() when creating the // initial cluster to be used in a test, or when creating additional test @@ -1713,6 +1765,12 @@ func (t *logicTest) newCluster( } } + if cfg.EnableDefaultIsolationLevel == tree.RepeatableReadIsolation { + if _, err := conn.Exec("SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true"); err != nil { + t.Fatal(err) + } + } + if cfg.DisableLocalityOptimizedSearch { if _, err := conn.Exec( "SET CLUSTER SETTING sql.defaults.locality_optimized_partitioned_index_scan.enabled = false", @@ -3228,14 +3286,10 @@ func (t *logicTest) processSubtest( if err := t.testserverCluster.UpgradeNode(nodeIdx); err != nil { t.Fatal(err) } - for i := 0; i < t.cfg.NumNodes; i++ { - // Wait for each node to be reachable, since UpgradeNode uses `kill` - // to terminate nodes, and may introduce temporary unavailability in - // the system range. - if err := t.testserverCluster.WaitForInitFinishForNode(i); err != nil { - t.Fatal(err) - } - } + // Wait for each node to be reachable, since UpgradeNode uses `kill` + // to terminate nodes, and may introduce temporary unavailability in + // the system range. + t.waitForAllNodes() // The port may have changed, so we must remove all the cached connections // to this node. for _, m := range t.clients { @@ -3607,11 +3661,12 @@ func (t *logicTest) finishExecQuery(query logicQuery, rows *gosql.Rows, err erro continue } valT := reflect.TypeOf(val).Kind() + colPos := i + 1 switch colT { case 'T': if valT != reflect.String && valT != reflect.Slice && valT != reflect.Struct { return fmt.Errorf("%s: expected text value for column %d, but found %T: %#v", - query.pos, i, val, val, + query.pos, colPos, val, val, ) } case 'I': @@ -3623,7 +3678,7 @@ func (t *logicTest) finishExecQuery(query logicQuery, rows *gosql.Rows, err erro return nil } return fmt.Errorf("%s: expected int value for column %d, but found %T: %#v", - query.pos, i, val, val, + query.pos, colPos, val, val, ) } case 'F', 'R': @@ -3635,19 +3690,19 @@ func (t *logicTest) finishExecQuery(query logicQuery, rows *gosql.Rows, err erro return nil } return fmt.Errorf("%s: expected float/decimal value for column %d, but found %T: %#v", - query.pos, i, val, val, + query.pos, colPos, val, val, ) } case 'B': if valT != reflect.Bool { return fmt.Errorf("%s: expected boolean value for column %d, but found %T: %#v", - query.pos, i, val, val, + query.pos, colPos, val, val, ) } case 'O': if valT != reflect.Slice { return fmt.Errorf("%s: expected oid value for column %d, but found %T: %#v", - query.pos, i, val, val, + query.pos, colPos, val, val, ) } default: diff --git a/pkg/sql/logictest/logictestbase/BUILD.bazel b/pkg/sql/logictest/logictestbase/BUILD.bazel index 36bd1ab1823d..4994a2286dd2 100644 --- a/pkg/sql/logictest/logictestbase/BUILD.bazel +++ b/pkg/sql/logictest/logictestbase/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/build", "//pkg/clusterversion", "//pkg/roachpb", + "//pkg/sql/sem/tree", ], ) diff --git a/pkg/sql/logictest/logictestbase/logictestbase.go b/pkg/sql/logictest/logictestbase/logictestbase.go index c75ba1932f8a..2e1626b2fc42 100644 --- a/pkg/sql/logictest/logictestbase/logictestbase.go +++ b/pkg/sql/logictest/logictestbase/logictestbase.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) var ( @@ -82,9 +83,9 @@ type TestClusterConfig struct { // disableLocalityOptimizedSearch disables the cluster setting // locality_optimized_partitioned_index_scan, which is enabled by default. DisableLocalityOptimizedSearch bool - // EnableDefaultReadCommitted uses READ COMMITTED for all transactions - // by default. - EnableDefaultReadCommitted bool + // EnableDefaultIsolationLevel uses the specified isolation level for all + // transactions by default. + EnableDefaultIsolationLevel tree.IsolationLevel // DeclarativeCorpusCollection enables support for collecting corpuses // for the declarative schema changer. DeclarativeCorpusCollection bool @@ -302,11 +303,18 @@ var LogicTestConfigs = []TestClusterConfig{ OverrideVectorize: "off", }, { - Name: "local-read-committed", - NumNodes: 1, - OverrideDistSQLMode: "off", - IsCCLConfig: true, - EnableDefaultReadCommitted: true, + Name: "local-read-committed", + NumNodes: 1, + OverrideDistSQLMode: "off", + IsCCLConfig: true, + EnableDefaultIsolationLevel: tree.ReadCommittedIsolation, + }, + { + Name: "local-repeatable-read", + NumNodes: 1, + OverrideDistSQLMode: "off", + IsCCLConfig: true, + EnableDefaultIsolationLevel: tree.RepeatableReadIsolation, }, { Name: "fakedist", @@ -581,6 +589,7 @@ var ( "local-legacy-schema-changer", "local-vec-off", "local-read-committed", + "local-repeatable-read", "fakedist", "fakedist-vec-off", "fakedist-disk", @@ -609,6 +618,16 @@ var ( "3node-tenant", "3node-tenant-multiregion", "local-read-committed", + "local-repeatable-read", + } + // WeakIsoLevelConfigName is a special alias for all configs which default to + // a weak transaction isolation level. + WeakIsoLevelConfigName = "weak-iso-level-configs" + // WeakIsoLevelConfigNames is the list of all weak transaction isolation level + // configs. + WeakIsoLevelConfigNames = []string{ + "local-read-committed", + "local-repeatable-read", } // DefaultConfig is the default test configuration. DefaultConfig = parseTestConfig(DefaultConfigNames) @@ -618,6 +637,8 @@ var ( ThreeNodeTenantDefaultConfig = parseTestConfig(ThreeNodeTenantDefaultConfigNames) // EnterpriseConfig is the enterprise test configuration. EnterpriseConfig = parseTestConfig(EnterpriseConfigNames) + // WeakIsoLevelConfig is the weak transaction isolation level test configuration. + WeakIsoLevelConfig = parseTestConfig(WeakIsoLevelConfigNames) ) // logger is an interface implemented by testing.TB as well as stdlogger below. @@ -807,6 +828,8 @@ func processConfigs( configs = append(configs, applyBlocklistToConfigs(ThreeNodeTenantDefaultConfig, blocklist)...) case EnterpriseConfigName: configs = append(configs, applyBlocklistToConfigs(EnterpriseConfig, blocklist)...) + case WeakIsoLevelConfigName: + configs = append(configs, applyBlocklistToConfigs(WeakIsoLevelConfig, blocklist)...) default: t.Fatalf("%s: unknown config name %s", path, configName) } @@ -882,6 +905,8 @@ func getDefaultConfigListNames(name string) []string { return ThreeNodeTenantDefaultConfigNames case EnterpriseConfigName: return EnterpriseConfigNames + case WeakIsoLevelConfigName: + return WeakIsoLevelConfigNames } return []string{} } diff --git a/pkg/sql/logictest/testdata/logic_test/alter_table b/pkg/sql/logictest/testdata/logic_test/alter_table index 193303208540..49985febb498 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_table +++ b/pkg/sql/logictest/testdata/logic_test/alter_table @@ -391,12 +391,12 @@ ALTER TABLE t DROP COLUMN IF EXISTS e # Negative test to assert that NOTICEs are NOT emitted if there are no indexes # being dropped. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs statement notice ^$ ALTER TABLE t DROP COLUMN IF EXISTS e CASCADE -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok ALTER TABLE t DROP COLUMN IF EXISTS e CASCADE @@ -613,7 +613,7 @@ statement ok ALTER TABLE add_default ADD COLUMN d TIMESTAMP DEFAULT transaction_timestamp() query II rowsort -SELECT a,b FROM add_default WHERE d > c AND d - c < interval '10s' +SELECT a,b FROM add_default WHERE d > c AND d - c < interval '20s' ---- 2 42 3 10 @@ -1584,19 +1584,19 @@ statement error column "g" does not exist ALTER TABLE unique_without_index ADD CONSTRAINT bad_partial_unique UNIQUE WITHOUT INDEX (f) WHERE g > 0 # The unique constraint prevents new duplicate values. -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error pgcode 23505 pq: duplicate key value violates unique constraint "my_unique_f"\nDETAIL: Key \(f\)=\(1\) already exists\. INSERT INTO unique_without_index (f) VALUES (1), (1) # There is no unique constraint on e, yet, so this insert succeeds. -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok INSERT INTO unique_without_index (e, f) VALUES (1, 1), (1, 2) # But trying to add a unique constraint now fails. # Note that we omit the constraint name in the expected error message because if the declarative schema changer is used, # the constraint name, at the time of validation failure, is still a place-holder name. -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error pgcode 23505 pq: could not create unique constraint ".*"\nDETAIL: Key \(e\)=\(1\) is duplicated\. ALTER TABLE unique_without_index ADD CONSTRAINT my_unique_e UNIQUE WITHOUT INDEX (e) @@ -1606,7 +1606,7 @@ ALTER TABLE unique_without_index ADD CONSTRAINT my_unique_e UNIQUE WITHOUT INDEX ALTER TABLE unique_without_index ADD CONSTRAINT my_unique_e2 UNIQUE WITHOUT INDEX (e) NOT VALID # Trying to validate one of the constraints will fail. -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error pgcode 23505 pq: could not create unique constraint ".*"\nDETAIL: Key \(e\)=\(1\) is duplicated\. ALTER TABLE unique_without_index VALIDATE CONSTRAINT my_unique_e @@ -1618,7 +1618,7 @@ statement ok ALTER TABLE unique_without_index VALIDATE CONSTRAINT my_unique_e # All these constraints are already valid, so validation should succeed. -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok ALTER TABLE unique_without_index VALIDATE CONSTRAINT unique_b; ALTER TABLE unique_without_index VALIDATE CONSTRAINT unique_a_b; @@ -1955,8 +1955,8 @@ subtest if_table_exists_already statement ok CREATE TABLE new_table() -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE IF NOT EXISTS new_table(); ---- @@ -2359,8 +2359,8 @@ ALTER TABLE storage_param_table SET (fillfactor=true) statement error unimplemented: storage parameter "toast_tuple_target" ALTER TABLE storage_param_table SET (toast_tuple_target=100) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE storage_param_table SET (fillfactor=99.9, autovacuum_enabled = off) ---- @@ -3967,3 +3967,37 @@ statement error pgcode 42601 variable sub-expressions are not allowed in EXPRESS ALTER TABLE t_124546 ADD CONSTRAINT ident UNIQUE ( ( EXISTS ( TABLE error FOR READ ONLY ) ) DESC ) STORING ( ident , ident ); subtest end + +subtest alter_table_add_column_serial + +statement ok +create table roach (id int); +insert into roach DEFAULT VALUES; +insert into roach DEFAULT VALUES; +SET serial_normalization = rowid + +statement ok +alter table roach add column serial_id SERIAL; + +query TTBTTTB colnames,rowsort +show columns from roach; +---- +column_name data_type is_nullable column_default generation_expression indices is_hidden +id INT8 true NULL · {roach_pkey} false +rowid INT8 false unique_rowid() · {roach_pkey} true +serial_id INT8 false unique_rowid() · {roach_pkey} false + +subtest end + +subtest unimplemented_for_non_rowid_in_DSC + +statement ok +SET serial_normalization = sql_sequence + +statement ok +SET use_declarative_schema_changer = unsafe_always + +statement error pq: \*tree.ColumnTableDef not implemented in the new schema changer: contains serial data type in unsupported mode +alter table roach add column serial_id2 SERIAL + +subtest end diff --git a/pkg/sql/logictest/testdata/logic_test/alter_type b/pkg/sql/logictest/testdata/logic_test/alter_type index fa973ccc6d03..518c1fde0a23 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_type +++ b/pkg/sql/logictest/testdata/logic_test/alter_type @@ -564,8 +564,8 @@ subtest if_not_exists statement ok CREATE TYPE ifNotExists AS ENUM() -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TYPE IF NOT EXISTS ifNotExists AS ENUM(); ---- diff --git a/pkg/sql/logictest/testdata/logic_test/as_of b/pkg/sql/logictest/testdata/logic_test/as_of index 5df4cdc84a61..3c32506e279b 100644 --- a/pkg/sql/logictest/testdata/logic_test/as_of +++ b/pkg/sql/logictest/testdata/logic_test/as_of @@ -37,7 +37,7 @@ SELECT pg_sleep(5) -- we need to sleep so that the 4.8s elapses and the SELECT * # Notices print twice -- once during planning and once during execution. # There's no nice way of reducing this to once without some hacks -- so left as is. -skipif config 3node-tenant-default-configs +skipif config enterprise-configs query T noticetrace,nosort SELECT * FROM t AS OF SYSTEM TIME follower_read_timestamp() ---- @@ -97,27 +97,27 @@ skipif config 3node-tenant-default-configs statement error pq: no value provided for placeholder: \$1 SELECT * FROM t AS OF SYSTEM TIME $1 -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_min_timestamp can only be used with a CCL distribution SELECT with_min_timestamp('2020-01-15 15:16:17') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_min_timestamp can only be used with a CCL distribution SELECT with_min_timestamp(statement_timestamp()) -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_max_staleness can only be used with a CCL distribution SELECT with_max_staleness('1s') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_min_timestamp can only be used with a CCL distribution SELECT * FROM t AS OF SYSTEM TIME with_min_timestamp('2020-01-15 15:16:17') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_min_timestamp can only be used with a CCL distribution SELECT * FROM t AS OF SYSTEM TIME with_min_timestamp(statement_timestamp()) -skipif config 3node-tenant-default-configs +skipif config enterprise-configs statement error pgcode XXC01 with_max_staleness can only be used with a CCL distribution SELECT * FROM t AS OF SYSTEM TIME with_max_staleness('1s'::interval) diff --git a/pkg/sql/logictest/testdata/logic_test/builtin_function b/pkg/sql/logictest/testdata/logic_test/builtin_function index a7a8eae0bcca..36929fa34d49 100644 --- a/pkg/sql/logictest/testdata/logic_test/builtin_function +++ b/pkg/sql/logictest/testdata/logic_test/builtin_function @@ -3864,6 +3864,72 @@ select crdb_internal.job_payload_type('invalid'::BYTES); query error pgcode 22023 pq: crdb_internal.job_payload_type\(\): invalid type in job payload protocol message: Payload.Type called on a payload with an unknown details type: select crdb_internal.job_payload_type(''); +subtest crdb_internal.get_fully_qualified_table_name +query T +SELECT crdb_internal.get_fully_qualified_table_name(NULL) +---- +NULL + +query T +SELECT crdb_internal.get_fully_qualified_table_name(9999999) +---- +NULL + +statement ok +CREATE DATABASE "testDatabase" + +query T +SELECT crdb_internal.get_fully_qualified_table_name((SELECT id FROM system.namespace WHERE name = 'foo')) +---- +test.public.foo + +statement error unknown signature: crdb_internal.get_fully_qualified_table_name\(string\) +SELECT crdb_internal.get_fully_qualified_table_name('') + +statement ok +USE "testDatabase" + +statement ok +CREATE SCHEMA "testSchema" + +statement ok +CREATE TABLE "testSchema"."testTable" (a INT) + +let $testTableID +SELECT id FROM system.namespace WHERE name = 'testTable' + +statement ok +USE test + +query T +SELECT crdb_internal.get_fully_qualified_table_name($testTableID) +---- +"testDatabase"."testSchema"."testTable" + +user testuser + +# Verify that testuser has permissions to use the builtin function, and the +# backing view. +query T +SELECT crdb_internal.get_fully_qualified_table_name($testTableID) +---- +"testDatabase"."testSchema"."testTable" + +user root + +statement ok +REVOKE CONNECT ON DATABASE "testDatabase" FROM public + +user testuser + +# testuser should no longer be able to see the name. +query T +SELECT crdb_internal.get_fully_qualified_table_name($testTableID) +---- +NULL + +user root + subtest crdb_internal.redactable_sql_constants query T SELECT crdb_internal.redactable_sql_constants(NULL) diff --git a/pkg/sql/logictest/testdata/logic_test/cascade b/pkg/sql/logictest/testdata/logic_test/cascade index 61ccb354c875..b759d67cf33e 100644 --- a/pkg/sql/logictest/testdata/logic_test/cascade +++ b/pkg/sql/logictest/testdata/logic_test/cascade @@ -1990,11 +1990,11 @@ INSERT INTO b VALUES (1, 1), (2, 2), (3, 4); INSERT INTO c VALUES (2, 1), (1, 2); # Perform a standard cascading update. -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement ok UPDATE a SET id = id*10; -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 query TII rowsort SELECT name, id1, id2 FROM ( SELECT 'a' AS name, id AS id1, 0 AS id2 FROM a @@ -2017,26 +2017,26 @@ c 10 20 c 20 10 # Try to update one value to fail c.less_than_100 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pq: failed to satisfy CHECK constraint \(\(id1 \+ id2\) < 100:::INT8\) UPDATE a SET id = id*10; # Try to update one value to fail c.less_than_100 or c.less_than_1000 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pq: failed to satisfy CHECK constraint \(\(id1 \+ id2\) < 100:::INT8\) UPDATE a SET id = id*10; # Try to update one value to fail c.less_than_100 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pq: failed to satisfy CHECK constraint \(\(id1 \+ id2\) < 1000:::INT8\) UPDATE a SET id = 1000 WHERE id = 30; -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pq: failed to satisfy CHECK constraint \(\(id1 \+ id2\) < 1000:::INT8\) UPDATE a SET id = 1000 WHERE id = 40; # Update a value that would fail the check if it was cascaded, but wasn't. -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement ok UPDATE a SET id = 100000 WHERE id = 50; diff --git a/pkg/sql/logictest/testdata/logic_test/ccl b/pkg/sql/logictest/testdata/logic_test/ccl index a1fed870c71a..c73190ecd453 100644 --- a/pkg/sql/logictest/testdata/logic_test/ccl +++ b/pkg/sql/logictest/testdata/logic_test/ccl @@ -1,6 +1,6 @@ -# 3node-tenant is blocked from running this file because the config runs with +# enterprise-configs are blocked from running this file because they run with # a CCL binary, so the expected failures from using a non-CCL binary don't occur. -# LogicTest: !3node-tenant-default-configs +# LogicTest: !enterprise-configs # CCL-only statements error out trying to handle the parsed statements. diff --git a/pkg/sql/logictest/testdata/logic_test/check_constraints b/pkg/sql/logictest/testdata/logic_test/check_constraints index 154a0044f4a6..83190663e2c0 100644 --- a/pkg/sql/logictest/testdata/logic_test/check_constraints +++ b/pkg/sql/logictest/testdata/logic_test/check_constraints @@ -133,15 +133,15 @@ SELECT * from t3 ---- 3 2 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pgcode 23514 failed to satisfy CHECK constraint UPDATE t3 SET b = 3 WHERE a = 3 -onlyif config local-read-committed 112488 +onlyif config weak-iso-level-configs 112488 statement error multi-column-family check constraints are not yet supported under read committed isolation UPDATE t3 SET b = 3 WHERE a = 3 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement ok UPDATE t3 SET b = 1 WHERE a = 3 @@ -328,23 +328,23 @@ INSERT INTO t9 VALUES (5, 3) statement error pgcode 23514 failed to satisfy CHECK constraint \(a > b\) INSERT INTO t9 VALUES (6, 7) -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement ok UPDATE t9 SET b = 4 WHERE a = 5 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pgcode 23514 failed to satisfy CHECK constraint \(a > b\) UPDATE t9 SET b = 6 WHERE a = 5 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement ok UPDATE t9 SET a = 7 WHERE a = 4 -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pgcode 23514 failed to satisfy CHECK constraint \(a > b\) UPDATE t9 SET a = 2 WHERE a = 5 -onlyif config local-read-committed 112488 +onlyif config weak-iso-level-configs 112488 statement error multi-column-family check constraints are not yet supported under read committed isolation UPDATE t9 SET b = 4 WHERE a = 5 diff --git a/pkg/sql/logictest/testdata/logic_test/cluster_locks b/pkg/sql/logictest/testdata/logic_test/cluster_locks index 9b089d7bea17..b1b4fac7c607 100644 --- a/pkg/sql/logictest/testdata/logic_test/cluster_locks +++ b/pkg/sql/logictest/testdata/logic_test/cluster_locks @@ -1,6 +1,8 @@ # LogicTest: local-read-committed -# Begin with SERIALIZABLE; later in the test the default is changed to READ COMMITTED +# This test uses local-read-committed so that it can also test locking behavior +# with READ COMMITTED transactions. However, we'll use a default of SERIALIZABLE +# for all transactions. statement ok SET default_transaction_isolation = 'SERIALIZABLE' @@ -25,7 +27,7 @@ query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t WITH DETAILS] ---- start_key end_key replicas lease_holder - …/1/"d" {1} 1 + …/1/"d" {1} 1 …/1/"d" …/1/"r" {1} 1 …/1/"r" {1} 1 @@ -54,6 +56,9 @@ SHOW session_id user testuser +statement ok +SET default_transaction_isolation = 'SERIALIZABLE' + let $testuser_session SHOW session_id @@ -355,7 +360,7 @@ SELECT count(*) FROM crdb_internal.cluster_locks WHERE table_name IN ('t','t2') # Test with different isolation levels. statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true statement ok BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -405,11 +410,11 @@ SELECT * FROM t WHERE k = 'a' FOR UPDATE; user root query TTTTTTTBB colnames,rowsort,retry -SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, regexp_replace(isolation_level, 'READ COMMITTED', 'READ_COMMITTED') AS isolation_level, granted, contended FROM crdb_internal.cluster_locks WHERE table_name = 't' +SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, regexp_replace(isolation_level, ' ', '_') AS isolation_level, granted, contended FROM crdb_internal.cluster_locks WHERE table_name = 't' ---- database_name schema_name table_name lock_key_pretty lock_strength durability isolation_level granted contended -test public t /Table/106/1/"a"/0 Exclusive Replicated SNAPSHOT true true -test public t /Table/106/1/"a"/0 Exclusive Replicated READ_COMMITTED false true +test public t /Table/106/1/"a"/0 Exclusive Replicated REPEATABLE_READ true true +test public t /Table/106/1/"a"/0 Exclusive Replicated READ_COMMITTED false true statement ok COMMIT diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal b/pkg/sql/logictest/testdata/logic_test/crdb_internal index e783e5847265..a8d8cf01894d 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal @@ -47,6 +47,7 @@ crdb_internal databases table node NULL N crdb_internal default_privileges table node NULL NULL crdb_internal feature_usage table node NULL NULL crdb_internal forward_dependencies table node NULL NULL +crdb_internal fully_qualified_names view node NULL NULL crdb_internal gossip_alerts table node NULL NULL crdb_internal gossip_liveness table node NULL NULL crdb_internal gossip_network table node NULL NULL @@ -1496,6 +1497,7 @@ test public f CREATE FUNCTION public.f(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -1505,6 +1507,7 @@ test public f CREATE FUNCTION public.f(STRING, b INT8) LEAKPROOF STRICT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ @@ -1514,6 +1517,7 @@ test sc f2 CREATE FUNCTION sc.f2(STRING) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ @@ -1536,6 +1540,7 @@ test public f CREATE FUNCTION public.f(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -1545,6 +1550,7 @@ test public f CREATE FUNCTION public.f(STRING, b INT8) LEAKPROOF STRICT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ @@ -1554,6 +1560,7 @@ test sc f2 CREATE FUNCTION sc.f2(STRING) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ @@ -1563,6 +1570,7 @@ test_cross_db public f_cross_db CREATE FUNCTION public.f_cross_db() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -1610,16 +1618,19 @@ ORDER BY procedure_id; ---- 104 test 105 public 139 p CREATE PROCEDURE public.p(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ 104 test 105 public 140 p CREATE PROCEDURE public.p(STRING, b INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ 104 test 142 sc 143 p2 CREATE PROCEDURE sc.p2(STRING) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ @@ -1638,21 +1649,25 @@ ORDER BY procedure_id; ---- 104 test 105 public 139 p CREATE PROCEDURE public.p(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ 104 test 105 public 140 p CREATE PROCEDURE public.p(STRING, b INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ 104 test 142 sc 143 p2 CREATE PROCEDURE sc.p2(STRING) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'hello'; $$ 144 test_cross_db 145 public 146 p_cross_db CREATE PROCEDURE public.p_cross_db() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -1695,3 +1710,36 @@ statement ok REVOKE SYSTEM VIEWCLUSTERMETADATA FROM testuser subtest end + +subtest test_crdb_internal_table_spans + +statement ok +CREATE DATABASE test_table_spans; +USE test_table_spans; + +statement ok +CREATE TABLE foo (a INT PRIMARY KEY, INDEX idx(a)); INSERT INTO foo VALUES(1); + +statement ok +CREATE TABLE bar (a INT PRIMARY KEY, INDEX idx(a)); + +query TB rowsort +SELECT name, dropped +FROM "".crdb_internal.table_spans s JOIN "".crdb_internal.tables t ON s.descriptor_id = t.table_id +WHERE t.database_name = 'test_table_spans'; +---- +foo false +bar false + +statement ok +DROP TABLE bar + +query TB rowsort +SELECT name, dropped +FROM "".crdb_internal.table_spans s JOIN "".crdb_internal.tables t ON s.descriptor_id = t.table_id +WHERE t.database_name = 'test_table_spans'; +---- +foo false +bar true + +subtest end diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog b/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog index 44605fe6278e..f525c63425d0 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal_catalog @@ -105,7 +105,7 @@ skipif config local-mixed-24.2 query IT SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor ORDER BY id ---- -1 {"database": {"id": 1, "name": "system", "privileges": {"ownerProto": "node", "users": [{"privileges": "2048", "userProto": "admin", "withGrantOption": "2048"}, {"privileges": "2048", "userProto": "root", "withGrantOption": "2048"}], "version": 3}, "systemDatabaseSchemaVersion": {"internal": 6, "majorVal": 1000024, "minorVal": 2}, "version": "1"}} +1 {"database": {"id": 1, "name": "system", "privileges": {"ownerProto": "node", "users": [{"privileges": "2048", "userProto": "admin", "withGrantOption": "2048"}, {"privileges": "2048", "userProto": "root", "withGrantOption": "2048"}], "version": 3}, "systemDatabaseSchemaVersion": {"internal": 14, "majorVal": 1000024, "minorVal": 2}, "version": "1"}} 3 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "descriptor", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 3, "name": "descriptor", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2], "storeColumnNames": ["descriptor"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "admin", "withGrantOption": "32"}, {"privileges": "32", "userProto": "root", "withGrantOption": "32"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 4 {"table": {"columns": [{"id": 1, "name": "username", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "hashedPassword", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"defaultExpr": "false", "id": 3, "name": "isRole", "type": {"oid": 16}}, {"id": 4, "name": "user_id", "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4, "indexes": [{"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [4], "keyColumnNames": ["user_id"], "keySuffixColumnIds": [1], "name": "users_user_id_idx", "partitioning": {}, "sharded": {}, "unique": true, "version": 3}], "name": "users", "nextColumnId": 5, "nextConstraintId": 3, "nextIndexId": 3, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 2, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["username"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["hashedPassword", "isRole", "user_id"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "2"}} 5 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "config", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 5, "name": "zones", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2], "storeColumnNames": ["config"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} @@ -143,7 +143,7 @@ SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor O 43 {"table": {"checks": [{"columnIds": [8], "constraintId": 2, "expr": "crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)", "fromHashShardedColumn": true, "name": "check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8"}], "columns": [{"id": 1, "name": "aggregated_ts", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 2, "name": "fingerprint_id", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "app_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "node_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "agg_interval", "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 6, "name": "metadata", "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 7, "name": "statistics", "type": {"family": "JsonFamily", "oid": 3802}}, {"computeExpr": "mod(fnv32(crdb_internal.datums_to_bytes(aggregated_ts, app_name, fingerprint_id, node_id)), 8:::INT8)", "hidden": true, "id": 8, "name": "crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8", "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"computeExpr": "((statistics->'statistics':::STRING)->'cnt':::STRING)::INT8", "id": 9, "name": "execution_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"computeExpr": "(((statistics->'statistics':::STRING)->'svcLat':::STRING)->'mean':::STRING)::FLOAT8", "id": 10, "name": "service_latency", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"computeExpr": "(((statistics->'execution_statistics':::STRING)->'cpuSQLNanos':::STRING)->'mean':::STRING)::FLOAT8", "id": 11, "name": "cpu_sql_nanos", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"computeExpr": "(((statistics->'execution_statistics':::STRING)->'contentionTime':::STRING)->'mean':::STRING)::FLOAT8", "id": 12, "name": "contention_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"computeExpr": "((statistics->'statistics':::STRING)->>'cnt':::STRING)::FLOAT8 * (((statistics->'statistics':::STRING)->'svcLat':::STRING)->>'mean':::STRING)::FLOAT8", "id": 13, "name": "total_estimated_execution_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"computeExpr": "(((statistics->'statistics':::STRING)->'latencyInfo':::STRING)->'p99':::STRING)::FLOAT8", "id": 14, "name": "p99_latency", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 43, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["fingerprint_id"], "keySuffixColumnIds": [8, 1, 3, 4], "name": "fingerprint_stats_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 3, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 9], "keyColumnNames": ["aggregated_ts", "app_name", "execution_count"], "keySuffixColumnIds": [8, 2, 4], "name": "execution_count_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}, {"compositeColumnIds": [10], "foreignKey": {}, "geoConfig": {}, "id": 4, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 10], "keyColumnNames": ["aggregated_ts", "app_name", "service_latency"], "keySuffixColumnIds": [8, 2, 4], "name": "service_latency_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}, {"compositeColumnIds": [11], "foreignKey": {}, "geoConfig": {}, "id": 5, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 11], "keyColumnNames": ["aggregated_ts", "app_name", "cpu_sql_nanos"], "keySuffixColumnIds": [8, 2, 4], "name": "cpu_sql_nanos_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}, {"compositeColumnIds": [12], "foreignKey": {}, "geoConfig": {}, "id": 6, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 12], "keyColumnNames": ["aggregated_ts", "app_name", "contention_time"], "keySuffixColumnIds": [8, 2, 4], "name": "contention_time_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}, {"compositeColumnIds": [13], "foreignKey": {}, "geoConfig": {}, "id": 7, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 13], "keyColumnNames": ["aggregated_ts", "app_name", "total_estimated_execution_time"], "keySuffixColumnIds": [8, 2, 4], "name": "total_estimated_execution_time_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}, {"compositeColumnIds": [14], "foreignKey": {}, "geoConfig": {}, "id": 8, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "DESC"], "keyColumnIds": [1, 3, 14], "keyColumnNames": ["aggregated_ts", "app_name", "p99_latency"], "keySuffixColumnIds": [8, 2, 4], "name": "p99_latency_idx", "partitioning": {}, "predicate": "app_name NOT LIKE '$ internal%':::STRING", "sharded": {}, "version": 3}], "name": "transaction_statistics", "nextColumnId": 15, "nextConstraintId": 3, "nextIndexId": 9, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "ASC", "ASC", "ASC"], "keyColumnIds": [8, 1, 2, 3, 4], "keyColumnNames": ["crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8", "aggregated_ts", "fingerprint_id", "app_name", "node_id"], "name": "primary", "partitioning": {}, "sharded": {"columnNames": ["aggregated_ts", "app_name", "fingerprint_id", "node_id"], "isSharded": true, "name": "crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8", "shardBuckets": 8}, "storeColumnIds": [5, 6, 7, 9, 10, 11, 12, 13, 14], "storeColumnNames": ["agg_interval", "metadata", "statistics", "execution_count", "service_latency", "cpu_sql_nanos", "contention_time", "total_estimated_execution_time", "p99_latency"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "admin", "withGrantOption": "32"}, {"privileges": "32", "userProto": "root", "withGrantOption": "32"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 44 {"table": {"columns": [{"id": 1, "name": "database_id", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "settings", "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 4, "name": "role_id", "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 44, "indexes": [{"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [1, 4], "keyColumnNames": ["database_id", "role_id"], "keySuffixColumnIds": [2], "name": "database_role_settings_database_id_role_id_key", "partitioning": {}, "sharded": {}, "storeColumnIds": [3], "storeColumnNames": ["settings"], "unique": true, "version": 3}], "name": "database_role_settings", "nextColumnId": 5, "nextConstraintId": 3, "nextIndexId": 3, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 2, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [1, 2], "keyColumnNames": ["database_id", "role_name"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [3, 4], "storeColumnNames": ["settings", "role_id"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 45 {"table": {"columns": [{"id": 1, "name": "tenant_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "instance_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "next_instance_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "last_update", "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 5, "name": "ru_burst_limit", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "ru_refill_rate", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 7, "name": "ru_current", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 8, "name": "current_share_sum", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 9, "name": "total_consumption", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 10, "name": "instance_lease", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 11, "name": "instance_seq", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "instance_shares", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 13, "name": "current_rates", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 14, "name": "next_rates", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "excludeDataFromBackup": true, "formatVersion": 3, "id": 45, "name": "tenant_usage", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [1, 2], "keyColumnNames": ["tenant_id", "instance_id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], "storeColumnNames": ["next_instance_id", "last_update", "ru_burst_limit", "ru_refill_rate", "ru_current", "current_share_sum", "total_consumption", "instance_lease", "instance_seq", "instance_shares", "current_rates", "next_rates"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} -46 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "addr", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "session_id", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "locality", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 5, "name": "sql_addr", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "crdb_region", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 7, "name": "binary_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 46, "name": "sql_instances", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [6, 1], "keyColumnNames": ["crdb_region", "id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 7], "storeColumnNames": ["addr", "session_id", "locality", "sql_addr", "binary_version"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} +46 {"table": {"columns": [{"id": 1, "name": "id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "addr", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "session_id", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "locality", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 5, "name": "sql_addr", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "crdb_region", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 7, "name": "binary_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_draining", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 46, "name": "sql_instances", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [6, 1], "keyColumnNames": ["crdb_region", "id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 7, 8], "storeColumnNames": ["addr", "session_id", "locality", "sql_addr", "binary_version", "is_draining"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 47 {"table": {"checks": [{"columnIds": [1, 2], "constraintId": 2, "expr": "start_key < end_key", "name": "check_bounds"}], "columns": [{"id": 1, "name": "start_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 2, "name": "end_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "config", "type": {"family": "BytesFamily", "oid": 17}}], "excludeDataFromBackup": true, "formatVersion": 3, "id": 47, "name": "span_configurations", "nextColumnId": 4, "nextConstraintId": 3, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["start_key"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3], "storeColumnNames": ["end_key", "config"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 48 {"table": {"columns": [{"id": 1, "name": "value", "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 48, "name": "role_id_seq", "parentId": 1, "primaryIndex": {"encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["value"], "name": "primary", "partitioning": {}, "sharded": {}, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "800", "userProto": "admin", "withGrantOption": "800"}, {"privileges": "800", "userProto": "root", "withGrantOption": "800"}], "version": 3}, "replacementOf": {"time": {}}, "sequenceOpts": {"cacheSize": "1", "increment": "1", "maxValue": "2147483647", "minValue": "100", "sequenceOwner": {}, "start": "100"}, "unexposedParentSchemaId": 29, "version": "1"}} 50 {"table": {"columns": [{"id": 1, "name": "tenant_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "value", "type": {"family": "StringFamily", "oid": 25}}, {"defaultExpr": "now():::TIMESTAMP", "id": 4, "name": "last_updated", "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 5, "name": "value_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "reason", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 50, "name": "tenant_settings", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [1, 2], "keyColumnNames": ["tenant_id", "name"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [3, 4, 5, 6], "storeColumnNames": ["value", "last_updated", "value_type", "reason"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} @@ -163,6 +163,7 @@ SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor O 64 {"table": {"checks": [{"columnIds": [6], "constraintId": 2, "expr": "crdb_internal_created_at_database_id_index_id_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8)", "fromHashShardedColumn": true, "name": "check_crdb_internal_created_at_database_id_index_id_table_id_shard_16"}], "columns": [{"defaultExpr": "now():::TIMESTAMPTZ", "id": 1, "name": "created_at", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 2, "name": "database_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "index_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "statistics", "type": {"family": "JsonFamily", "oid": 3802}}, {"computeExpr": "mod(fnv32(md5(crdb_internal.datums_to_bytes(created_at))), 16:::INT8)", "hidden": true, "id": 6, "name": "crdb_internal_created_at_database_id_index_id_table_id_shard_16", "type": {"family": "IntFamily", "oid": 23, "width": 32}, "virtual": true}], "formatVersion": 3, "id": 64, "name": "mvcc_statistics", "nextColumnId": 7, "nextConstraintId": 3, "nextIndexId": 2, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC", "ASC", "ASC", "ASC"], "keyColumnIds": [6, 1, 2, 3, 4], "keyColumnNames": ["crdb_internal_created_at_database_id_index_id_table_id_shard_16", "created_at", "database_id", "table_id", "index_id"], "name": "mvcc_statistics_pkey", "partitioning": {}, "sharded": {"columnNames": ["created_at", "database_id", "index_id", "table_id"], "isSharded": true, "name": "crdb_internal_created_at_database_id_index_id_table_id_shard_16", "shardBuckets": 16}, "storeColumnIds": [5], "storeColumnNames": ["statistics"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 65 {"table": {"checks": [{"columnIds": [23], "constraintId": 2, "expr": "crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8)", "fromHashShardedColumn": true, "name": "check_crdb_internal_end_time_start_time_shard_16"}], "columns": [{"id": 1, "name": "transaction_id", "type": {"family": "UuidFamily", "oid": 2950}}, {"id": 2, "name": "transaction_fingerprint_id", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "query_summary", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "implicit_txn", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "session_id", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "start_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 7, "name": "end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 8, "name": "user_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "app_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "user_priority", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "retries", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "last_retry_reason", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "problems", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 14, "name": "causes", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 15, "name": "stmt_execution_ids", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 16, "name": "cpu_sql_nanos", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "last_error_code", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "status", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "contention_time", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 20, "name": "contention_info", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 21, "name": "details", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"defaultExpr": "now():::TIMESTAMPTZ", "id": 22, "name": "created", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"computeExpr": "mod(fnv32(md5(crdb_internal.datums_to_bytes(end_time, start_time))), 16:::INT8)", "hidden": true, "id": 23, "name": "crdb_internal_end_time_start_time_shard_16", "type": {"family": "IntFamily", "oid": 23, "width": 32}, "virtual": true}], "formatVersion": 3, "id": 65, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["transaction_fingerprint_id"], "keySuffixColumnIds": [1], "name": "transaction_fingerprint_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 3, "interleave": {}, "keyColumnDirections": ["ASC", "DESC", "DESC"], "keyColumnIds": [23, 6, 7], "keyColumnNames": ["crdb_internal_end_time_start_time_shard_16", "start_time", "end_time"], "keySuffixColumnIds": [1], "name": "time_range_idx", "partitioning": {}, "sharded": {"columnNames": ["end_time", "start_time"], "isSharded": true, "name": "crdb_internal_end_time_start_time_shard_16", "shardBuckets": 16}, "version": 3}], "name": "transaction_execution_insights", "nextColumnId": 24, "nextConstraintId": 3, "nextIndexId": 4, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["transaction_id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], "storeColumnNames": ["transaction_fingerprint_id", "query_summary", "implicit_txn", "session_id", "start_time", "end_time", "user_name", "app_name", "user_priority", "retries", "last_retry_reason", "problems", "causes", "stmt_execution_ids", "cpu_sql_nanos", "last_error_code", "status", "contention_time", "contention_info", "details", "created"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 66 {"table": {"checks": [{"columnIds": [29], "constraintId": 2, "expr": "crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8)", "fromHashShardedColumn": true, "name": "check_crdb_internal_end_time_start_time_shard_16"}], "columns": [{"id": 1, "name": "session_id", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "transaction_id", "type": {"family": "UuidFamily", "oid": 2950}}, {"id": 3, "name": "transaction_fingerprint_id", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "statement_id", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "statement_fingerprint_id", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 6, "name": "problem", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "causes", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 8, "name": "query", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "status", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "start_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 11, "name": "end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 12, "name": "full_scan", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "user_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "app_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "user_priority", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "database_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "plan_gist", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "retries", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "last_retry_reason", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "execution_node_ids", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 21, "name": "index_recommendations", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 22, "name": "implicit_txn", "nullable": true, "type": {"oid": 16}}, {"id": 23, "name": "cpu_sql_nanos", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "error_code", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "contention_time", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 26, "name": "contention_info", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 27, "name": "details", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"defaultExpr": "now():::TIMESTAMPTZ", "id": 28, "name": "created", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"computeExpr": "mod(fnv32(md5(crdb_internal.datums_to_bytes(end_time, start_time))), 16:::INT8)", "hidden": true, "id": 29, "name": "crdb_internal_end_time_start_time_shard_16", "type": {"family": "IntFamily", "oid": 23, "width": 32}, "virtual": true}], "formatVersion": 3, "id": 66, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["transaction_id"], "keySuffixColumnIds": [4], "name": "transaction_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 3, "interleave": {}, "keyColumnDirections": ["ASC", "DESC", "DESC"], "keyColumnIds": [3, 10, 11], "keyColumnNames": ["transaction_fingerprint_id", "start_time", "end_time"], "keySuffixColumnIds": [4, 2], "name": "transaction_fingerprint_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 4, "interleave": {}, "keyColumnDirections": ["ASC", "DESC", "DESC"], "keyColumnIds": [5, 10, 11], "keyColumnNames": ["statement_fingerprint_id", "start_time", "end_time"], "keySuffixColumnIds": [4, 2], "name": "statement_fingerprint_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 5, "interleave": {}, "keyColumnDirections": ["ASC", "DESC", "DESC"], "keyColumnIds": [29, 10, 11], "keyColumnNames": ["crdb_internal_end_time_start_time_shard_16", "start_time", "end_time"], "keySuffixColumnIds": [4, 2], "name": "time_range_idx", "partitioning": {}, "sharded": {"columnNames": ["end_time", "start_time"], "isSharded": true, "name": "crdb_internal_end_time_start_time_shard_16", "shardBuckets": 16}, "version": 3}], "name": "statement_execution_insights", "nextColumnId": 30, "nextConstraintId": 3, "nextIndexId": 6, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [4, 2], "keyColumnNames": ["statement_id", "transaction_id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "storeColumnNames": ["session_id", "transaction_fingerprint_id", "statement_fingerprint_id", "problem", "causes", "query", "status", "start_time", "end_time", "full_scan", "user_name", "app_name", "user_priority", "database_name", "plan_gist", "retries", "last_retry_reason", "execution_node_ids", "index_recommendations", "implicit_txn", "cpu_sql_nanos", "error_code", "contention_time", "contention_info", "details", "created"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} +67 {"table": {"checks": [{"columnIds": [16], "constraintId": 2, "expr": "crdb_internal_last_updated_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8)", "fromHashShardedColumn": true, "name": "check_crdb_internal_last_updated_table_id_shard_16"}], "columns": [{"id": 1, "name": "db_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "db_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "schema_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "total_columns", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "total_indexes", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "store_ids", "type": {"arrayContents": {"family": "IntFamily", "oid": 20, "width": 64}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1016, "width": 64}}, {"id": 9, "name": "replication_size_bytes", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "total_ranges", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "total_live_data_bytes", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "total_data_bytes", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "perc_live_data", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 14, "name": "last_update_error", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"defaultExpr": "now():::TIMESTAMPTZ", "id": 15, "name": "last_updated", "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"computeExpr": "mod(fnv32(md5(crdb_internal.datums_to_bytes(table_id, last_updated))), 16:::INT8)", "hidden": true, "id": 16, "name": "crdb_internal_last_updated_table_id_shard_16", "type": {"family": "IntFamily", "oid": 23, "width": 32}, "virtual": true}], "formatVersion": 3, "id": 67, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["DESC", "ASC"], "keyColumnIds": [9, 2], "keyColumnNames": ["replication_size_bytes", "table_id"], "keySuffixColumnIds": [1], "name": "replication_size_bytes_table_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 3, "interleave": {}, "keyColumnDirections": ["DESC", "ASC"], "keyColumnIds": [10, 2], "keyColumnNames": ["total_ranges", "table_id"], "keySuffixColumnIds": [1], "name": "total_ranges_table_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 4, "interleave": {}, "keyColumnDirections": ["DESC", "ASC"], "keyColumnIds": [6, 2], "keyColumnNames": ["total_columns", "table_id"], "keySuffixColumnIds": [1], "name": "total_columns_table_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 5, "interleave": {}, "keyColumnDirections": ["DESC", "ASC"], "keyColumnIds": [7, 2], "keyColumnNames": ["total_indexes", "table_id"], "keySuffixColumnIds": [1], "name": "total_indexes_table_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"compositeColumnIds": [13], "foreignKey": {}, "geoConfig": {}, "id": 6, "interleave": {}, "keyColumnDirections": ["DESC", "ASC"], "keyColumnIds": [13, 2], "keyColumnNames": ["perc_live_data", "table_id"], "keySuffixColumnIds": [1], "name": "perc_live_data_id_idx", "partitioning": {}, "sharded": {}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 7, "interleave": {}, "keyColumnDirections": ["ASC", "DESC", "ASC"], "keyColumnIds": [16, 15, 2], "keyColumnNames": ["crdb_internal_last_updated_table_id_shard_16", "last_updated", "table_id"], "keySuffixColumnIds": [1], "name": "last_updated_idx", "partitioning": {}, "sharded": {"columnNames": ["last_updated", "table_id"], "isSharded": true, "name": "crdb_internal_last_updated_table_id_shard_16", "shardBuckets": 16}, "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 8, "interleave": {}, "invertedColumnKinds": ["TRIGRAM"], "keyColumnDirections": ["ASC"], "keyColumnIds": [3], "keyColumnNames": ["db_name"], "keySuffixColumnIds": [1, 2], "name": "db_name_gin", "partitioning": {}, "sharded": {}, "type": "INVERTED", "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 9, "interleave": {}, "invertedColumnKinds": ["TRIGRAM"], "keyColumnDirections": ["ASC"], "keyColumnIds": [5], "keyColumnNames": ["table_name"], "keySuffixColumnIds": [1, 2], "name": "table_name_gin", "partitioning": {}, "sharded": {}, "type": "INVERTED", "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 10, "interleave": {}, "invertedColumnKinds": ["TRIGRAM"], "keyColumnDirections": ["ASC"], "keyColumnIds": [4], "keyColumnNames": ["schema_name"], "keySuffixColumnIds": [1, 2], "name": "schema_name_gin", "partitioning": {}, "sharded": {}, "type": "INVERTED", "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 11, "interleave": {}, "invertedColumnKinds": ["DEFAULT"], "keyColumnDirections": ["ASC"], "keyColumnIds": [8], "keyColumnNames": ["store_ids"], "keySuffixColumnIds": [1, 2], "name": "store_ids_gin", "partitioning": {}, "sharded": {}, "type": "INVERTED", "version": 3}], "name": "table_metadata", "nextColumnId": 17, "nextConstraintId": 3, "nextIndexId": 12, "nextMutationId": 1, "parentId": 1, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC", "ASC"], "keyColumnIds": [1, 2], "keyColumnNames": ["db_id", "table_id"], "name": "primary", "partitioning": {}, "sharded": {}, "storeColumnIds": [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], "storeColumnNames": ["db_name", "schema_name", "table_name", "total_columns", "total_indexes", "store_ids", "replication_size_bytes", "total_ranges", "total_live_data_bytes", "total_data_bytes", "perc_live_data", "last_update_error", "last_updated"], "unique": true, "version": 4}, "privileges": {"ownerProto": "node", "users": [{"privileges": "480", "userProto": "admin", "withGrantOption": "480"}, {"privileges": "480", "userProto": "root", "withGrantOption": "480"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 29, "version": "1"}} 100 {"database": {"defaultPrivileges": {}, "id": 100, "name": "defaultdb", "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "2048", "userProto": "public"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "schemas": {"public": {"id": 101}}, "version": "1"}} 101 {"schema": {"id": 101, "name": "public", "parentId": 100, "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "516", "userProto": "public"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "version": "1"}} 102 {"database": {"defaultPrivileges": {}, "id": 102, "name": "postgres", "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "2048", "userProto": "public"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "schemas": {"public": {"id": 103}}, "version": "1"}} @@ -177,227 +178,228 @@ SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor O 111 {"table": {"checks": [{"columnIds": [1], "constraintId": 2, "expr": "k > 0:::INT8", "name": "ck"}], "columns": [{"id": 1, "name": "k", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "v", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "dependedOnBy": [{"columnIds": [1, 2], "id": 112}], "formatVersion": 3, "id": 111, "name": "kv", "nextColumnId": 3, "nextConstraintId": 3, "nextIndexId": 2, "nextMutationId": 1, "parentId": 106, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["k"], "name": "kv_pkey", "partitioning": {}, "sharded": {}, "storeColumnIds": [2], "storeColumnNames": ["v"], "unique": true, "version": 4}, "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 107, "version": "4"}} 112 {"table": {"columns": [{"id": 1, "name": "k", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "v", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"defaultExpr": "unique_rowid()", "hidden": true, "id": 3, "name": "rowid", "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "dependsOn": [111], "formatVersion": 3, "id": 112, "indexes": [{"createdExplicitly": true, "foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["v"], "keySuffixColumnIds": [3], "name": "idx", "partitioning": {}, "sharded": {}, "version": 4}], "isMaterializedView": true, "name": "mv", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 4, "nextMutationId": 1, "parentId": 106, "primaryIndex": {"constraintId": 1, "encodingType": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [3], "keyColumnNames": ["rowid"], "name": "mv_pkey", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 2], "storeColumnNames": ["k", "v"], "unique": true, "version": 4}, "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 107, "version": "8", "viewQuery": "SELECT k, v FROM db.public.kv"}} 113 {"function": {"functionBody": "SELECT json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(json_remove_path(d, ARRAY['table':::STRING, 'families':::STRING]:::STRING[]), ARRAY['table':::STRING, 'nextFamilyId':::STRING]:::STRING[]), ARRAY['table':::STRING, 'indexes':::STRING, '0':::STRING, 'createdAtNanos':::STRING]:::STRING[]), ARRAY['table':::STRING, 'indexes':::STRING, '1':::STRING, 'createdAtNanos':::STRING]:::STRING[]), ARRAY['table':::STRING, 'indexes':::STRING, '2':::STRING, 'createdAtNanos':::STRING]:::STRING[]), ARRAY['table':::STRING, 'primaryIndex':::STRING, 'createdAtNanos':::STRING]:::STRING[]), ARRAY['table':::STRING, 'createAsOfTime':::STRING]:::STRING[]), ARRAY['table':::STRING, 'modificationTime':::STRING]:::STRING[]), ARRAY['function':::STRING, 'modificationTime':::STRING]:::STRING[]), ARRAY['type':::STRING, 'modificationTime':::STRING]:::STRING[]), ARRAY['schema':::STRING, 'modificationTime':::STRING]:::STRING[]), ARRAY['database':::STRING, 'modificationTime':::STRING]:::STRING[]);", "id": 113, "lang": "SQL", "name": "strip_volatile", "nullInputBehavior": "CALLED_ON_NULL_INPUT", "params": [{"class": "IN", "name": "d", "type": {"family": "JsonFamily", "oid": 3802}}], "parentId": 104, "parentSchemaId": 105, "privileges": {"ownerProto": "root", "users": [{"privileges": "2", "userProto": "admin", "withGrantOption": "2"}, {"privileges": "1048576", "userProto": "public"}, {"privileges": "2", "userProto": "root", "withGrantOption": "2"}], "version": 3}, "returnType": {"type": {"family": "JsonFamily", "oid": 3802}}, "version": "1", "volatility": "STABLE"}} -4294966970 {"table": {"columns": [{"id": 1, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "auth_name", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 256}}, {"id": 3, "name": "auth_srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "srtext", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 2048}}, {"id": 5, "name": "proj4text", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 2048}}], "formatVersion": 3, "id": 4294966970, "name": "spatial_ref_sys", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966973, "version": "1"}} -4294966971 {"table": {"columns": [{"id": 1, "name": "f_table_catalog", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "f_table_schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "f_table_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "f_geometry_column", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "coord_dimension", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966971, "name": "geometry_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966973, "version": "1"}} -4294966972 {"table": {"columns": [{"id": 1, "name": "f_table_catalog", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "f_table_schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "f_table_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "f_geography_column", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "coord_dimension", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966972, "name": "geography_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966973, "version": "1"}} -4294966973 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294966973, "name": "pg_extension", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} -4294966974 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "viewname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "viewowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966974, "name": "pg_views", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966975 {"table": {"columns": [{"id": 1, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usecreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "usesuper", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "userepl", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "usebypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "passwd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "valuntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "useconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966975, "name": "pg_user", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966976 {"table": {"columns": [{"id": 1, "name": "umid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srvid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "srvname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "umuser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "umoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966976, "name": "pg_user_mappings", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966977 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "umuser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "umserver", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "umoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966977, "name": "pg_user_mapping", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966978 {"table": {"columns": [{"id": 1, "name": "oid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "typname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "typnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "typowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "typlen", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "typbyval", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "typtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 8, "name": "typcategory", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 9, "name": "typispreferred", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "typisdefined", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "typdelim", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 12, "name": "typrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "typelem", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "typarray", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "typinput", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 16, "name": "typoutput", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 17, "name": "typreceive", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 18, "name": "typsend", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 19, "name": "typmodin", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 20, "name": "typmodout", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 21, "name": "typanalyze", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 22, "name": "typalign", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 23, "name": "typstorage", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 24, "name": "typnotnull", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "typbasetype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 26, "name": "typtypmod", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 27, "name": "typndims", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 28, "name": "typcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 29, "name": "typdefaultbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "typdefault", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "typacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966978, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_type_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], "storeColumnNames": ["typname", "typnamespace", "typowner", "typlen", "typbyval", "typtype", "typcategory", "typispreferred", "typisdefined", "typdelim", "typrelid", "typelem", "typarray", "typinput", "typoutput", "typreceive", "typsend", "typmodin", "typmodout", "typanalyze", "typalign", "typstorage", "typnotnull", "typbasetype", "typtypmod", "typndims", "typcollation", "typdefaultbin", "typdefault", "typacl"], "version": 3}], "name": "pg_type", "nextColumnId": 32, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966979 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "tmplname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tmplnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "tmplinit", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "tmpllexize", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966979, "name": "pg_ts_template", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966980 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "prsname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "prsnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "prsstart", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "prstoken", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 6, "name": "prsend", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 7, "name": "prsheadline", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 8, "name": "prslextype", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966980, "name": "pg_ts_parser", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966981 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "dictname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "dictnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "dictowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "dicttemplate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "dictinitoption", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966981, "name": "pg_ts_dict", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966982 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "cfgname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "cfgnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "cfgowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "cfgparser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966982, "name": "pg_ts_config", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966983 {"table": {"columns": [{"id": 1, "name": "mapcfg", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "maptokentype", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "mapseqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "mapdict", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966983, "name": "pg_ts_config_map", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966984 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "tgrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "tgname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tgfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "tgtype", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "tgenabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 7, "name": "tgisinternal", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "tgconstrrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "tgconstrindid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "tgconstraint", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "tgdeferrable", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "tginitdeferred", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "tgnargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 14, "name": "tgattr", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 15, "name": "tgargs", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 16, "name": "tgqual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "tgoldtable", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 18, "name": "tgnewtable", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 19, "name": "tgparentid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966984, "name": "pg_trigger", "nextColumnId": 20, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966985 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "trftype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "trflang", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "trffromsql", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "trftosql", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966985, "name": "pg_transform", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966986 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "abbrev", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "utc_offset", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 4, "name": "is_dst", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966986, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["name"], "name": "pg_timezone_names_name_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["abbrev", "utc_offset", "is_dst"], "version": 3}], "name": "pg_timezone_names", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966987 {"table": {"columns": [{"id": 1, "name": "abbrev", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "utc_offset", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 3, "name": "is_dst", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966987, "name": "pg_timezone_abbrevs", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966988 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "spcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "spcowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "spclocation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "spcacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 6, "name": "spcoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966988, "name": "pg_tablespace", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966989 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tableowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "hasindexes", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "hasrules", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "hastriggers", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rowsecurity", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966989, "name": "pg_tables", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966990 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "subdbid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "subname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "subowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "subenabled", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "subconninfo", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "subslotname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 8, "name": "subsynccommit", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "subpublications", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966990, "name": "pg_subscription", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966991 {"table": {"columns": [{"id": 1, "name": "srsubid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "srsubstate", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 4, "name": "srsublsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966991, "name": "pg_subscription_rel", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966992 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "attname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "inherited", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "null_frac", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 6, "name": "avg_width", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "n_distinct", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 8, "name": "most_common_vals", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 9, "name": "most_common_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 10, "name": "histogram_bounds", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 11, "name": "correlation", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 12, "name": "most_common_elems", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 13, "name": "most_common_elem_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 14, "name": "elem_count_histogram", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}], "formatVersion": 3, "id": 4294966992, "name": "pg_stats", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966993 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "statistics_schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "statistics_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "statistics_owner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "attnames", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 7, "name": "kinds", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}, {"id": 8, "name": "n_distinct", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 9, "name": "dependencies", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 10, "name": "most_common_vals", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 11, "name": "most_common_val_nulls", "nullable": true, "type": {"arrayContents": {"oid": 16}, "arrayElemType": "BoolFamily", "family": "ArrayFamily", "oid": 1000}}, {"id": 12, "name": "most_common_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 701, "width": 64}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1022, "width": 64}}, {"id": 13, "name": "most_common_base_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 701, "width": 64}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1022, "width": 64}}], "formatVersion": 3, "id": 4294966993, "name": "pg_stats_ext", "nextColumnId": 14, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966994 {"table": {"columns": [{"id": 1, "name": "starelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "staattnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 3, "name": "stainherit", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "stanullfrac", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 5, "name": "stawidth", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "stadistinct", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 7, "name": "stakind1", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 8, "name": "stakind2", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 9, "name": "stakind3", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 10, "name": "stakind4", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 11, "name": "stakind5", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 12, "name": "staop1", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "staop2", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "staop3", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "staop4", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 16, "name": "staop5", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 17, "name": "stacoll1", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "stacoll2", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 19, "name": "stacoll3", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "stacoll4", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "stacoll5", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 22, "name": "stanumbers1", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 23, "name": "stanumbers2", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 24, "name": "stanumbers3", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 25, "name": "stanumbers4", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 26, "name": "stanumbers5", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 27, "name": "stavalues1", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 28, "name": "stavalues2", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 29, "name": "stavalues3", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 30, "name": "stavalues4", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 31, "name": "stavalues5", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966994, "name": "pg_statistic", "nextColumnId": 32, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966995 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "stxrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "stxname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "stxnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "stxowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "stxstattarget", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "stxkeys", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 8, "name": "stxkind", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294966995, "name": "pg_statistic_ext", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966996 {"table": {"columns": [{"id": 1, "name": "stxoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "stxdndistinct", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "stxddependencies", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "stxdmcv", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294966996, "name": "pg_statistic_ext_data", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966997 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966997, "name": "pg_statio_user_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966998 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966998, "name": "pg_statio_user_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294966999 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966999, "name": "pg_statio_user_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967000 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967000, "name": "pg_statio_sys_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967001 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967001, "name": "pg_statio_sys_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967002 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967002, "name": "pg_statio_sys_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967003 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967003, "name": "pg_statio_all_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967004 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967004, "name": "pg_statio_all_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967005 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967005, "name": "pg_statio_all_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967006 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967006, "name": "pg_stat_xact_user_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967007 {"table": {"columns": [{"id": 1, "name": "funcid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "funcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "calls", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "total_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "self_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967007, "name": "pg_stat_xact_user_functions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967008 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967008, "name": "pg_stat_xact_sys_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967009 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967009, "name": "pg_stat_xact_all_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967010 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "receive_start_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "receive_start_tli", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "written_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "flushed_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "received_tli", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "last_msg_send_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "last_msg_receipt_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 10, "name": "latest_end_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "latest_end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 12, "name": "slot_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "sender_host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "sender_port", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 15, "name": "conninfo", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967010, "name": "pg_stat_wal_receiver", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967011 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967011, "name": "pg_stat_user_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967012 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967012, "name": "pg_stat_user_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967013 {"table": {"columns": [{"id": 1, "name": "funcid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "funcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "calls", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "total_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "self_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967013, "name": "pg_stat_user_functions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967014 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967014, "name": "pg_stat_sys_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967015 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967015, "name": "pg_stat_sys_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967016 {"table": {"columns": [{"id": 1, "name": "subid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "subname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "received_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "last_msg_send_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 7, "name": "last_msg_receipt_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 8, "name": "latest_end_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "latest_end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967016, "name": "pg_stat_subscription", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967017 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "ssl", "nullable": true, "type": {"oid": 16}}, {"id": 3, "name": "version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "cipher", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "bits", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "compression", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "client_dn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "client_serial", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 9, "name": "issuer_dn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967017, "name": "pg_stat_ssl", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967018 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "blks_zeroed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_written", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "blks_exists", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "flushes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "truncates", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967018, "name": "pg_stat_slru", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967019 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "application_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "client_addr", "nullable": true, "type": {"family": "INetFamily", "oid": 869}}, {"id": 6, "name": "client_hostname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "client_port", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "backend_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "backend_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "sent_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "write_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "flush_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "replay_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "write_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 16, "name": "flush_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 17, "name": "replay_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 18, "name": "sync_priority", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 19, "name": "sync_state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "reply_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967019, "name": "pg_stat_replication", "nextColumnId": 21, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967020 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "heap_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "heap_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "heap_blks_vacuumed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "index_vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "max_dead_tuples", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "num_dead_tuples", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967020, "name": "pg_stat_progress_vacuum", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967021 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "index_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "lockers_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "lockers_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "current_locker_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "blocks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "blocks_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "tuples_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "tuples_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "partitions_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "partitions_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967021, "name": "pg_stat_progress_create_index", "nextColumnId": 17, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967022 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "cluster_index_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "heap_tuples_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "heap_tuples_written", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "heap_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "heap_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "index_rebuild_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967022, "name": "pg_stat_progress_cluster", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967023 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "backup_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "backup_streamed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "tablespaces_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "tablespaces_streamed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967023, "name": "pg_stat_progress_basebackup", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967024 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "sample_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "sample_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "ext_stats_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "ext_stats_computed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "child_tables_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "child_tables_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "current_child_table_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967024, "name": "pg_stat_progress_analyze", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967025 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "gss_authenticated", "nullable": true, "type": {"oid": 16}}, {"id": 3, "name": "principal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "encrypted", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967025, "name": "pg_stat_gssapi", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967026 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "numbackends", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "xact_commit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "xact_rollback", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "tup_returned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "tup_fetched", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tup_inserted", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tup_updated", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "tup_deleted", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "conflicts", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "temp_files", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "temp_bytes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "deadlocks", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "checksum_failures", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "checksum_last_failure", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "blk_read_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 20, "name": "blk_write_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 21, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967026, "name": "pg_stat_database", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967027 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "confl_tablespace", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "confl_lock", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "confl_snapshot", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "confl_bufferpin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "confl_deadlock", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967027, "name": "pg_stat_database_conflicts", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967028 {"table": {"columns": [{"id": 1, "name": "checkpoints_timed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "checkpoints_req", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "checkpoint_write_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 4, "name": "checkpoint_sync_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 5, "name": "buffers_checkpoint", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "buffers_clean", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "maxwritten_clean", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "buffers_backend", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "buffers_backend_fsync", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "buffers_alloc", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967028, "name": "pg_stat_bgwriter", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967029 {"table": {"columns": [{"id": 1, "name": "archived_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "last_archived_wal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "last_archived_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "failed_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "last_failed_wal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "last_failed_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 7, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967029, "name": "pg_stat_archiver", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967030 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967030, "name": "pg_stat_all_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967031 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967031, "name": "pg_stat_all_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967032 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "application_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "client_addr", "nullable": true, "type": {"family": "INetFamily", "oid": 869}}, {"id": 8, "name": "client_hostname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "client_port", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "backend_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 11, "name": "xact_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 12, "name": "query_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "state_change", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 14, "name": "wait_event_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "wait_event", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "backend_xid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "backend_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "query", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "backend_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "leader_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}], "formatVersion": 3, "id": 4294967032, "name": "pg_stat_activity", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967033 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "off", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "allocated_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967033, "name": "pg_shmem_allocations", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967034 {"table": {"columns": [{"id": 1, "name": "dbid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "refclassid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "refobjid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "deptype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967034, "name": "pg_shdepend", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967035 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967035, "name": "pg_shseclabel", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967036 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967036, "name": "pg_shdescription", "nextColumnId": 4, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1", "viewQuery": "SELECT objoid, classoid, description FROM \"\".crdb_internal.kv_catalog_comments WHERE classoid = 4294967085:::OID"}} -4294967037 {"table": {"columns": [{"id": 1, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usecreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "usesuper", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "userepl", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "usebypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "passwd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "valuntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "useconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967037, "name": "pg_shadow", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967038 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "unit", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "short_desc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "extra_desc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "context", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "vartype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "source", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "min_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "max_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "enumvals", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "boot_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "reset_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "sourcefile", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "sourceline", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "pending_restart", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967038, "name": "pg_settings", "nextColumnId": 18, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967039 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "sequencename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "sequenceowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "data_type", "nullable": true, "type": {"family": "OidFamily", "oid": 2206}}, {"id": 5, "name": "start_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "min_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "max_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "increment_by", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "cycle", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "cache_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "last_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967039, "name": "pg_sequences", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967040 {"table": {"columns": [{"id": 1, "name": "seqrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "seqtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "seqstart", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "seqincrement", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seqmax", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "seqmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "seqcache", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "seqcycle", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967040, "name": "pg_sequence", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967041 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967041, "name": "pg_seclabel", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967042 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "objtype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "objnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "objname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967042, "name": "pg_seclabels", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967043 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rulename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967043, "name": "pg_rules", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967044 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rolname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rolsuper", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "rolinherit", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "rolcreaterole", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "rolcreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "rolcatupdate", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rolcanlogin", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "rolreplication", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "rolconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "rolpassword", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "rolvaliduntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "rolbypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "rolconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967044, "name": "pg_roles", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967045 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rulename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "ev_class", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "ev_type", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "ev_enabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "is_instead", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "ev_qual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ev_action", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967045, "name": "pg_rewrite", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967046 {"table": {"columns": [{"id": 1, "name": "slot_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "plugin", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "slot_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "datoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "database", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "temporary", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "active", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "active_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 9, "name": "xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "catalog_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "restart_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "confirmed_flush_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "wal_status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "safe_wal_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967046, "name": "pg_replication_slots", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967047 {"table": {"columns": [{"id": 1, "name": "roident", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "roname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967047, "name": "pg_replication_origin", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967048 {"table": {"columns": [{"id": 1, "name": "local_id", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "external_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "remote_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "local_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967048, "name": "pg_replication_origin_status", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967049 {"table": {"columns": [{"id": 1, "name": "rngtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rngsubtype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "rngcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "rngsubopc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "rngcanonical", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "rngsubdiff", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967049, "name": "pg_range", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967050 {"table": {"columns": [{"id": 1, "name": "pubname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}], "formatVersion": 3, "id": 4294967050, "name": "pg_publication_tables", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967051 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "pubname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pubowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "puballtables", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "pubinsert", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "pubupdate", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "pubdelete", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "pubtruncate", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "pubviaroot", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967051, "name": "pg_publication", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967052 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "prpubid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "prrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967052, "name": "pg_publication_rel", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967053 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "proname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pronamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "proowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "prolang", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "procost", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 7, "name": "prorows", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 8, "name": "provariadic", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "prosupport", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 10, "name": "prokind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 11, "name": "prosecdef", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "proleakproof", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "proisstrict", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "proretset", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "provolatile", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 16, "name": "proparallel", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "pronargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 18, "name": "pronargdefaults", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 19, "name": "prorettype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "proargtypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 21, "name": "proallargtypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 22, "name": "proargmodes", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}, {"id": 23, "name": "proargnames", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 24, "name": "proargdefaults", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "protrftypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 26, "name": "prosrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "probin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "prosqlbody", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "proconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 30, "name": "proacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967053, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_proc_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "storeColumnNames": ["proname", "pronamespace", "proowner", "prolang", "procost", "prorows", "provariadic", "prosupport", "prokind", "prosecdef", "proleakproof", "proisstrict", "proretset", "provolatile", "proparallel", "pronargs", "pronargdefaults", "prorettype", "proargtypes", "proallargtypes", "proargmodes", "proargnames", "proargdefaults", "protrftypes", "prosrc", "probin", "prosqlbody", "proconfig", "proacl"], "version": 3}], "name": "pg_proc", "nextColumnId": 31, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967054 {"table": {"columns": [{"id": 1, "name": "transaction", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "gid", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "prepared", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "owner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "database", "nullable": true, "type": {"family": 11, "oid": 19}}], "formatVersion": 3, "id": 4294967054, "name": "pg_prepared_xacts", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967055 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "prepare_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "parameter_types", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 2206}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 2211}}, {"id": 5, "name": "from_sql", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967055, "name": "pg_prepared_statements", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967056 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "polname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "polrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "polcmd", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "polpermissive", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "polroles", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 7, "name": "polqual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "polwithcheck", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967056, "name": "pg_policy", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967057 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "policyname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "permissive", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "roles", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 6, "name": "cmd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "qual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_check", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967057, "name": "pg_policies", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967058 {"table": {"columns": [{"id": 1, "name": "partrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "partstrat", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 3, "name": "partnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "partdefid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "partattrs", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 6, "name": "partclass", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 7, "name": "partcollation", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 8, "name": "partexprs", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967058, "name": "pg_partitioned_table", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967059 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "opfmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "opfname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "opfnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "opfowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967059, "name": "pg_opfamily", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967060 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "oprname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "oprnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "oprowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "oprkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "oprcanmerge", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "oprcanhash", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "oprleft", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "oprright", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "oprresult", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "oprcom", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 12, "name": "oprnegate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "oprcode", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "oprrest", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "oprjoin", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967060, "name": "pg_operator", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967061 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "opcmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "opcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "opcnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "opcowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "opcfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "opcintype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "opcdefault", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "opckeytype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967061, "name": "pg_opclass", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967062 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "nspname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "nspowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "nspacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967062, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_namespace_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["nspname", "nspowner", "nspacl"], "version": 3}], "name": "pg_namespace", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967063 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "matviewname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "matviewowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "hasindexes", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "ispopulated", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967063, "name": "pg_matviews", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967064 {"table": {"columns": [{"id": 1, "name": "locktype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "database", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "relation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "page", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "tuple", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "virtualxid", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "transactionid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 11, "name": "virtualtransaction", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 13, "name": "mode", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "granted", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "fastpath", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967064, "name": "pg_locks", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967065 {"table": {"columns": [{"id": 1, "name": "loid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "pageno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "data", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967065, "name": "pg_largeobject", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967066 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "lomowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "lomacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967066, "name": "pg_largeobject_metadata", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967067 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "lanname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "lanowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "lanispl", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "lanpltrusted", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "lanplcallfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "laninline", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "lanvalidator", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "lanacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967067, "name": "pg_language", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967068 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "privtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "initprivs", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967068, "name": "pg_init_privs", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967069 {"table": {"columns": [{"id": 1, "name": "inhrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "inhparent", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "inhseqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}], "formatVersion": 3, "id": 4294967069, "name": "pg_inherits", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967070 {"table": {"columns": [{"id": 1, "name": "crdb_oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "indexname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "indexdef", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967070, "name": "pg_indexes", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967071 {"table": {"columns": [{"id": 1, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "indnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "indisunique", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "indnullsnotdistinct", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "indisprimary", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "indisexclusion", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "indimmediate", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "indisclustered", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "indisvalid", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "indcheckxmin", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "indisready", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "indislive", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "indisreplident", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "indkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 16, "name": "indcollation", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 17, "name": "indclass", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 18, "name": "indoption", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 19, "name": "indexprs", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "indpred", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "indnkeyatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}], "formatVersion": 3, "id": 4294967071, "name": "pg_index", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967072 {"table": {"columns": [{"id": 1, "name": "line_number", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "database", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 4, "name": "user_name", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 5, "name": "address", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "netmask", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "auth_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "options", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 9, "name": "error", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967072, "name": "pg_hba_file_rules", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967073 {"table": {"columns": [{"id": 1, "name": "groname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "grosysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "grolist", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}], "formatVersion": 3, "id": 4294967073, "name": "pg_group", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967074 {"table": {"columns": [{"id": 1, "name": "ftrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "ftserver", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "ftoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967074, "name": "pg_foreign_table", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967075 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srvname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "srvowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "srvfdw", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "srvtype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "srvversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "srvacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 8, "name": "srvoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967075, "name": "pg_foreign_server", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967076 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "fdwname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "fdwowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "fdwhandler", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "fdwvalidator", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "fdwacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 7, "name": "fdwoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967076, "name": "pg_foreign_data_wrapper", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967077 {"table": {"columns": [{"id": 1, "name": "sourcefile", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "sourceline", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "seqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "applied", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "error", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967077, "name": "pg_file_settings", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967078 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "extname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "extowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "extnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "extrelocatable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "extversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "extconfig", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "extcondition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967078, "name": "pg_extension", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967079 {"table": {"columns": [{"id": 1, "name": "evtname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "evtevent", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "evtowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "evtfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "evtenabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "evttags", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 7, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967079, "name": "pg_event_trigger", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967080 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "enumtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "enumsortorder", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 4, "name": "enumlabel", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967080, "name": "pg_enum", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967081 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967081, "name": "pg_description", "nextColumnId": 5, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1", "viewQuery": "SELECT objoid, classoid, objsubid, description FROM crdb_internal.kv_catalog_comments WHERE classoid != 4294967085 UNION ALL SELECT oid AS objoid, 4294967053:::OID AS classoid, 0:::INT4 AS objsubid, description AS description FROM crdb_internal.kv_builtin_function_comments"}} -4294967082 {"table": {"columns": [{"id": 1, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "refclassid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "refobjid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "refobjsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "deptype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967082, "name": "pg_depend", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967083 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "defaclrole", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "defaclnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "defaclobjtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "defaclacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967083, "name": "pg_default_acl", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967084 {"table": {"columns": [{"id": 1, "name": "setconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 2, "name": "setdatabase", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "setrole", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967084, "name": "pg_db_role_setting", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967085 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "datdba", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "encoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "datcollate", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "datctype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "datistemplate", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "datallowconn", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "datconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 10, "name": "datlastsysoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "datfrozenxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "datminmxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "dattablespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "datacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967085, "name": "pg_database", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967086 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_holdable", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "is_binary", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "is_scrollable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "creation_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967086, "name": "pg_cursors", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967087 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "conname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "connamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "conowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "conforencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "contoencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "conproc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "condefault", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967087, "name": "pg_conversion", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967088 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "conname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "connamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "contype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "condeferrable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "condeferred", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "convalidated", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "conrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "contypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "conindid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "confrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 12, "name": "confupdtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 13, "name": "confdeltype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 14, "name": "confmatchtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 15, "name": "conislocal", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "coninhcount", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "connoinherit", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "conkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1005, "width": 16}}, {"id": 19, "name": "confkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1005, "width": 16}}, {"id": 20, "name": "conpfeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 21, "name": "conppeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 22, "name": "conffeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 23, "name": "conexclop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 24, "name": "conbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "consrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "condef", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "conparentid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967088, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [8], "keyColumnNames": ["conrelid"], "name": "pg_constraint_conrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27], "storeColumnNames": ["oid", "conname", "connamespace", "contype", "condeferrable", "condeferred", "convalidated", "contypid", "conindid", "confrelid", "confupdtype", "confdeltype", "confmatchtype", "conislocal", "coninhcount", "connoinherit", "conkey", "confkey", "conpfeqop", "conppeqop", "conffeqop", "conexclop", "conbin", "consrc", "condef", "conparentid"], "version": 3}], "name": "pg_constraint", "nextColumnId": 28, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967089 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967089, "name": "pg_config", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967090 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "collname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "collowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "collencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "collcollate", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "collctype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "collprovider", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 9, "name": "collversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "collisdeterministic", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967090, "name": "pg_collation", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967091 {"table": {"columns": [{"id": 1, "name": "oid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "relname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "reltype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "reloftype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "relowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "relam", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "relfilenode", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "reltablespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "relpages", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "reltuples", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 12, "name": "relallvisible", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 13, "name": "reltoastrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "relhasindex", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "relisshared", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "relpersistence", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "relistemp", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "relkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 19, "name": "relnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 20, "name": "relchecks", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 21, "name": "relhasoids", "nullable": true, "type": {"oid": 16}}, {"id": 22, "name": "relhaspkey", "nullable": true, "type": {"oid": 16}}, {"id": 23, "name": "relhasrules", "nullable": true, "type": {"oid": 16}}, {"id": 24, "name": "relhastriggers", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "relhassubclass", "nullable": true, "type": {"oid": 16}}, {"id": 26, "name": "relfrozenxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "relacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 28, "name": "reloptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 29, "name": "relforcerowsecurity", "nullable": true, "type": {"oid": 16}}, {"id": 30, "name": "relispartition", "nullable": true, "type": {"oid": 16}}, {"id": 31, "name": "relispopulated", "nullable": true, "type": {"oid": 16}}, {"id": 32, "name": "relreplident", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 33, "name": "relrewrite", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 34, "name": "relrowsecurity", "nullable": true, "type": {"oid": 16}}, {"id": 35, "name": "relpartbound", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "relminmxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967091, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_class_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], "storeColumnNames": ["relname", "relnamespace", "reltype", "reloftype", "relowner", "relam", "relfilenode", "reltablespace", "relpages", "reltuples", "relallvisible", "reltoastrelid", "relhasindex", "relisshared", "relpersistence", "relistemp", "relkind", "relnatts", "relchecks", "relhasoids", "relhaspkey", "relhasrules", "relhastriggers", "relhassubclass", "relfrozenxid", "relacl", "reloptions", "relforcerowsecurity", "relispartition", "relispopulated", "relreplident", "relrewrite", "relrowsecurity", "relpartbound", "relminmxid"], "version": 3}], "name": "pg_class", "nextColumnId": 37, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967092 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "castsource", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "casttarget", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "castfunc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "castcontext", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "castmethod", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967092, "name": "pg_cast", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967093 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "default_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "installed_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967093, "name": "pg_available_extensions", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967094 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "installed", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "superuser", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "trusted", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "relocatable", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 8, "name": "requires", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 9, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967094, "name": "pg_available_extension_versions", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967095 {"table": {"columns": [{"id": 1, "name": "roleid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "member", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "grantor", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "admin_option", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967095, "name": "pg_auth_members", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967096 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rolname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rolsuper", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "rolinherit", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "rolcreaterole", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "rolcreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "rolcanlogin", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rolreplication", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "rolbypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "rolconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "rolpassword", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "rolvaliduntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967096, "name": "pg_authid", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967097 {"table": {"columns": [{"id": 1, "name": "attrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "attname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "atttypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "attstattarget", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "attlen", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "attnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 7, "name": "attndims", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "attcacheoff", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 9, "name": "atttypmod", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 10, "name": "attbyval", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "attstorage", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 12, "name": "attalign", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 13, "name": "attnotnull", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "atthasdef", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "attidentity", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 16, "name": "attgenerated", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "attisdropped", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "attislocal", "nullable": true, "type": {"oid": 16}}, {"id": 19, "name": "attinhcount", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 20, "name": "attcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "attacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 22, "name": "attoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 23, "name": "attfdwoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 24, "name": "atthasmissing", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "attmissingval", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 26, "name": "attishidden", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967097, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["attrelid"], "name": "pg_attribute_attrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26], "storeColumnNames": ["attname", "atttypid", "attstattarget", "attlen", "attnum", "attndims", "attcacheoff", "atttypmod", "attbyval", "attstorage", "attalign", "attnotnull", "atthasdef", "attidentity", "attgenerated", "attisdropped", "attislocal", "attinhcount", "attcollation", "attacl", "attoptions", "attfdwoptions", "atthasmissing", "attmissingval", "attishidden"], "version": 3}], "name": "pg_attribute", "nextColumnId": 27, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967098 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "adrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "adnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "adbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "adsrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967098, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["adrelid"], "name": "pg_attrdef_adrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 3, 4, 5], "storeColumnNames": ["oid", "adnum", "adbin", "adsrc"], "version": 3}], "name": "pg_attrdef", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967099 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amprocfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "amproclefttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "amprocrighttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "amprocnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "amproc", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294967099, "name": "pg_amproc", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967100 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amopfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "amoplefttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "amoprighttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "amopstrategy", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "amoppurpose", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 7, "name": "amopopr", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "amopmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "amopsortfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967100, "name": "pg_amop", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967101 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "amstrategies", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "amsupport", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 5, "name": "amcanorder", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "amcanorderbyop", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "amcanbackward", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "amcanunique", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "amcanmulticol", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "amoptionalkey", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "amsearcharray", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "amsearchnulls", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "amstorage", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "amclusterable", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "ampredlocks", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "amkeytype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 17, "name": "aminsert", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "ambeginscan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 19, "name": "amgettuple", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "amgetbitmap", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "amrescan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 22, "name": "amendscan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 23, "name": "ammarkpos", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 24, "name": "amrestrpos", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 25, "name": "ambuild", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 26, "name": "ambuildempty", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 27, "name": "ambulkdelete", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 28, "name": "amvacuumcleanup", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 29, "name": "amcanreturn", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 30, "name": "amcostestimate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 31, "name": "amoptions", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 32, "name": "amhandler", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 33, "name": "amtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967101, "name": "pg_am", "nextColumnId": 34, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967102 {"table": {"columns": [{"id": 1, "name": "aggfnoid", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 2, "name": "aggkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 3, "name": "aggnumdirectargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "aggtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "aggfinalfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 6, "name": "aggcombinefn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 7, "name": "aggserialfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 8, "name": "aggdeserialfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 9, "name": "aggmtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 10, "name": "aggminvtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 11, "name": "aggmfinalfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 12, "name": "aggfinalextra", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "aggmfinalextra", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "aggsortop", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "aggtranstype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 16, "name": "aggtransspace", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "aggmtranstype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "aggmtransspace", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 19, "name": "agginitval", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "aggminitval", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "aggfinalmodify", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 22, "name": "aggmfinalmodify", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967102, "name": "pg_aggregate", "nextColumnId": 23, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967103, "version": "1"}} -4294967103 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294967103, "name": "pg_catalog", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} -4294967104 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "view_definition", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "check_option", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_updatable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_insertable_into", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_trigger_updatable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "is_trigger_deletable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_trigger_insertable_into", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967104, "name": "views", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967105 {"table": {"columns": [{"id": 1, "name": "view_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "view_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "view_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967105, "name": "view_table_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967106 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967106, "name": "view_routine_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967107 {"table": {"columns": [{"id": 1, "name": "view_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "view_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "view_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967107, "name": "view_column_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967108 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967108, "name": "user_privileges", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967109 {"table": {"columns": [{"id": 1, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967109, "name": "user_mappings", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967110 {"table": {"columns": [{"id": 1, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967110, "name": "user_mapping_options", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967111 {"table": {"columns": [{"id": 1, "name": "user_defined_type_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "user_defined_type_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "user_defined_type_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "user_defined_type_category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_instantiable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_final", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "ordering_form", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ordering_category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "ordering_routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "ordering_routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "ordering_routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "reference_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 26, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 28, "name": "source_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "ref_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967111, "name": "user_defined_types", "nextColumnId": 30, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS user_defined_type_catalog, CAST(n.nspname AS STRING) AS user_defined_type_schema, CAST(c.relname AS STRING) AS user_defined_type_name, CAST('STRUCTURED' AS STRING) AS user_defined_type_category, CAST('YES' AS STRING) AS is_instantiable, CAST(NULL AS STRING) AS is_final, CAST(NULL AS STRING) AS ordering_form, CAST(NULL AS STRING) AS ordering_category, CAST(NULL AS STRING) AS ordering_routine_catalog, CAST(NULL AS STRING) AS ordering_routine_schema, CAST(NULL AS STRING) AS ordering_routine_name, CAST(NULL AS STRING) AS reference_type, CAST(NULL AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(NULL AS STRING) AS source_dtd_identifier, CAST(NULL AS STRING) AS ref_dtd_identifier FROM pg_namespace AS n, pg_class AS c, pg_type AS t WHERE (((n.oid = c.relnamespace) AND (t.typrelid = c.oid)) AND (c.relkind = 'c')) AND (pg_has_role(t.typowner, 'USAGE') OR has_type_privilege(t.oid, 'USAGE'))"}} -4294967112 {"table": {"columns": [{"id": 1, "name": "attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "user", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967112, "name": "user_attributes", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967113 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967113, "name": "usage_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967114 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967114, "name": "udt_privileges", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967115 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "type_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "type_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "type_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967115, "name": "type_privileges", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967116 {"table": {"columns": [{"id": 1, "name": "trigger_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "trigger_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "trigger_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "event_manipulation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "event_object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "event_object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "event_object_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "action_order", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "action_condition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "action_statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "action_orientation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "action_timing", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "action_reference_old_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "action_reference_new_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "action_reference_old_row", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "action_reference_new_row", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967116, "name": "triggers", "nextColumnId": 18, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967117 {"table": {"columns": [{"id": 1, "name": "trigger_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "trigger_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "trigger_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "event_object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "event_object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "event_object_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "event_object_column", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967117, "name": "triggered_update_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967118 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "transform_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967118, "name": "transforms", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967119 {"table": {"columns": [{"id": 1, "name": "extent_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "logfile_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "nodegroup_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "tablespace_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "autoextend_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "maximum_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "tablespace_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967119, "name": "tablespaces", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967120 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967120, "name": "tablespaces_extensions", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967121 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_insertable_into", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "version", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967121, "name": "tables", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967122 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967122, "name": "tables_extensions", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967123 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_hierarchy", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967123, "name": "table_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967124 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967124, "name": "table_constraints_extensions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967125 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "constraint_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_deferrable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "initially_deferred", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967125, "name": "table_constraints", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967126 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "non_unique", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "index_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "index_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "seq_in_index", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "COLLATION", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "direction", "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "storing", "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "implicit", "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "is_visible", "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "visibility", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967126, "name": "statistics", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967127 {"table": {"columns": [{"id": 1, "name": "conversion_factor", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 2, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "unit_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "unit_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967127, "name": "st_units_of_measure", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967128 {"table": {"columns": [{"id": 1, "name": "srs_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "srs_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "organization", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "organization_coordsys_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967128, "name": "st_spatial_reference_systems", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967129 {"table": {"columns": [{"id": 1, "name": "srs_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "srs_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "geometry_type_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967129, "name": "st_geometry_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967130 {"table": {"columns": [{"id": 1, "name": "variable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "value", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967130, "name": "session_variables", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967131 {"table": {"columns": [{"id": 1, "name": "sequence_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "sequence_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "sequence_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "data_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "numeric_precision", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "numeric_precision_radix", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "numeric_scale", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "start_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "minimum_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "maximum_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "increment", "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "cycle_option", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967131, "name": "sequences", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967132 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967132, "name": "schema_privileges", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967133 {"table": {"columns": [{"id": 1, "name": "catalog_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "schema_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "default_character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "sql_path", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "crdb_is_user_defined", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967133, "name": "schemata", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967134 {"table": {"columns": [{"id": 1, "name": "catalog_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "options", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "schema_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967134, "name": "schemata_extensions", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967135 {"table": {"columns": [{"id": 1, "name": "sizing_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "sizing_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "supported_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967135, "name": "sql_sizing", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967136 {"table": {"columns": [{"id": 1, "name": "feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_supported", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "is_verified_by", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967136, "name": "sql_parts", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967137 {"table": {"columns": [{"id": 1, "name": "implementation_info_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "implementation_info_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "integer_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "character_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967137, "name": "sql_implementation_info", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967138 {"table": {"columns": [{"id": 1, "name": "feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "sub_feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "sub_feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_supported", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_verified_by", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967138, "name": "sql_features", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967139 {"table": {"columns": [{"id": 1, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "module_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "module_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "module_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 26, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 29, "name": "type_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "type_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "type_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 33, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 34, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 35, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 36, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 37, "name": "routine_body", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 38, "name": "routine_definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 39, "name": "external_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 40, "name": "external_language", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 41, "name": "parameter_style", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 42, "name": "is_deterministic", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 43, "name": "sql_data_access", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 44, "name": "is_null_call", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 45, "name": "sql_path", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 46, "name": "schema_level_routine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 47, "name": "max_dynamic_result_sets", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 48, "name": "is_user_defined_cast", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 49, "name": "is_implicitly_invocable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 50, "name": "security_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 51, "name": "to_sql_specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 52, "name": "to_sql_specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 53, "name": "to_sql_specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 54, "name": "as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 55, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 56, "name": "last_altered", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 57, "name": "new_savepoint_level", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 58, "name": "is_udt_dependent", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 59, "name": "result_cast_from_data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 60, "name": "result_cast_as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 61, "name": "result_cast_char_max_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 62, "name": "result_cast_char_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 63, "name": "result_cast_char_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 64, "name": "result_cast_char_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 65, "name": "result_cast_char_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 66, "name": "result_cast_collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 67, "name": "result_cast_collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 68, "name": "result_cast_collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 69, "name": "result_cast_numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 70, "name": "result_cast_numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 71, "name": "result_cast_numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 72, "name": "result_cast_datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 73, "name": "result_cast_interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 74, "name": "result_cast_interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 75, "name": "result_cast_type_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 76, "name": "result_cast_type_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 77, "name": "result_cast_type_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 78, "name": "result_cast_scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 79, "name": "result_cast_scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 80, "name": "result_cast_scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 81, "name": "result_cast_maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 82, "name": "result_cast_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967139, "name": "routines", "nextColumnId": 83, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS specific_catalog, CAST(n.nspname AS STRING) AS specific_schema, CAST(nameconcatoid(p.proname, p.oid) AS STRING) AS specific_name, CAST(current_database() AS STRING) AS routine_catalog, CAST(n.nspname AS STRING) AS routine_schema, CAST(p.proname AS STRING) AS routine_name, CAST(CASE p.prokind WHEN 'f' THEN 'FUNCTION' WHEN 'p' THEN 'PROCEDURE' END AS STRING) AS routine_type, CAST(NULL AS STRING) AS module_catalog, CAST(NULL AS STRING) AS module_schema, CAST(NULL AS STRING) AS module_name, CAST(NULL AS STRING) AS udt_catalog, CAST(NULL AS STRING) AS udt_schema, CAST(NULL AS STRING) AS udt_name, CAST(CASE WHEN p.prokind = 'p' THEN NULL WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(CASE WHEN nt.nspname IS NOT NULL THEN current_database() END AS STRING) AS type_udt_catalog, CAST(nt.nspname AS STRING) AS type_udt_schema, CAST(t.typname AS STRING) AS type_udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST(CASE WHEN p.prokind != 'p' THEN 0 END AS STRING) AS dtd_identifier, CAST(CASE WHEN l.lanname = 'sql' THEN 'SQL' ELSE 'EXTERNAL' END AS STRING) AS routine_body, CAST(CASE WHEN pg_has_role(p.proowner, 'USAGE') THEN p.prosrc ELSE NULL END AS STRING) AS routine_definition, CAST(CASE WHEN l.lanname = 'c' THEN p.prosrc ELSE NULL END AS STRING) AS external_name, CAST(upper(l.lanname) AS STRING) AS external_language, CAST('GENERAL' AS STRING) AS parameter_style, CAST(CASE WHEN p.provolatile = 'i' THEN 'YES' ELSE 'NO' END AS STRING) AS is_deterministic, CAST('MODIFIES' AS STRING) AS sql_data_access, CAST(CASE WHEN p.prokind != 'p' THEN CASE WHEN p.proisstrict THEN 'YES' ELSE 'NO' END END AS STRING) AS is_null_call, CAST(NULL AS STRING) AS sql_path, CAST('YES' AS STRING) AS schema_level_routine, CAST(0 AS INT8) AS max_dynamic_result_sets, CAST(NULL AS STRING) AS is_user_defined_cast, CAST(NULL AS STRING) AS is_implicitly_invocable, CAST(CASE WHEN p.prosecdef THEN 'DEFINER' ELSE 'INVOKER' END AS STRING) AS security_type, CAST(NULL AS STRING) AS to_sql_specific_catalog, CAST(NULL AS STRING) AS to_sql_specific_schema, CAST(NULL AS STRING) AS to_sql_specific_name, CAST('NO' AS STRING) AS as_locator, CAST(NULL AS TIMESTAMPTZ) AS created, CAST(NULL AS TIMESTAMPTZ) AS last_altered, CAST(NULL AS STRING) AS new_savepoint_level, CAST('NO' AS STRING) AS is_udt_dependent, CAST(NULL AS STRING) AS result_cast_from_data_type, CAST(NULL AS STRING) AS result_cast_as_locator, CAST(NULL AS INT8) AS result_cast_char_max_length, CAST(NULL AS INT8) AS result_cast_char_octet_length, CAST(NULL AS STRING) AS result_cast_char_set_catalog, CAST(NULL AS STRING) AS result_cast_char_set_schema, CAST(NULL AS STRING) AS result_cast_char_set_name, CAST(NULL AS STRING) AS result_cast_collation_catalog, CAST(NULL AS STRING) AS result_cast_collation_schema, CAST(NULL AS STRING) AS result_cast_collation_name, CAST(NULL AS INT8) AS result_cast_numeric_precision, CAST(NULL AS INT8) AS result_cast_numeric_precision_radix, CAST(NULL AS INT8) AS result_cast_numeric_scale, CAST(NULL AS INT8) AS result_cast_datetime_precision, CAST(NULL AS STRING) AS result_cast_interval_type, CAST(NULL AS INT8) AS result_cast_interval_precision, CAST(NULL AS STRING) AS result_cast_type_udt_catalog, CAST(NULL AS STRING) AS result_cast_type_udt_schema, CAST(NULL AS STRING) AS result_cast_type_udt_name, CAST(NULL AS STRING) AS result_cast_scope_catalog, CAST(NULL AS STRING) AS result_cast_scope_schema, CAST(NULL AS STRING) AS result_cast_scope_name, CAST(NULL AS INT8) AS result_cast_maximum_cardinality, CAST(NULL AS STRING) AS result_cast_dtd_identifier FROM (pg_catalog.pg_namespace AS n JOIN pg_catalog.pg_proc AS p ON n.oid = p.pronamespace JOIN pg_catalog.pg_language AS l ON p.prolang = l.oid) LEFT JOIN (pg_catalog.pg_type AS t JOIN pg_catalog.pg_namespace AS nt ON t.typnamespace = nt.oid) ON (p.prorettype = t.oid) AND (p.prokind != 'p') WHERE (pg_has_role(p.proowner, 'USAGE') OR has_function_privilege(p.oid, 'EXECUTE'))"}} -4294967140 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967140, "name": "routine_privileges", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967141 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967141, "name": "role_usage_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967142 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967142, "name": "role_udt_grants", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967143 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_hierarchy", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967143, "name": "role_table_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967144 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967144, "name": "role_routine_grants", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967145 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967145, "name": "role_column_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967146 {"table": {"columns": [{"id": 1, "name": "resource_group_enabled", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 2, "name": "resource_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "resource_group_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "thread_priority", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "vcpu_ids", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967146, "name": "resource_groups", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967147 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "unique_constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "unique_constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "unique_constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "match_option", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "update_rule", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "delete_rule", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "referenced_table_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967147, "name": "referential_constraints", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967148 {"table": {"columns": [{"id": 1, "name": "cpu_system", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 2, "name": "messages_sent", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "swaps", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "block_ops_in", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "block_ops_out", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "context_voluntary", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "cpu_user", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 8, "name": "query_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "source_function", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "context_involuntary", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "duration", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 12, "name": "page_faults_major", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "page_faults_minor", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "seq", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "source_file", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "messages_received", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "source_line", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967148, "name": "profiling", "nextColumnId": 19, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967149 {"table": {"columns": [{"id": 1, "name": "host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "info", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "time", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "user", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "db", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967149, "name": "processlist", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967150 {"table": {"columns": [{"id": 1, "name": "plugin_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "load_option", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "plugin_description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "plugin_library_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "plugin_status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "plugin_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "plugin_type_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "plugin_author", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "plugin_library", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "plugin_license", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "plugin_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967150, "name": "plugins", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967151 {"table": {"columns": [{"id": 1, "name": "data_free", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "partition_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "subpartition_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_rows", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "avg_row_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "check_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 8, "name": "create_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "index_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "nodegroup", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "partition_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "partition_description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "checksum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "partition_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "partition_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "subpartition_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "update_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "data_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "max_data_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "partition_ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "subpartition_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "subpartition_ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967151, "name": "partitions", "nextColumnId": 26, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967152 {"table": {"columns": [{"id": 1, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "parameter_mode", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_result", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "parameter_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 20, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 31, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "parameter_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967152, "name": "parameters", "nextColumnId": 33, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS specific_catalog, CAST(n_nspname AS STRING) AS specific_schema, CAST(nameconcatoid(proname, p_oid) AS STRING) AS specific_name, CAST((ss.x).n AS INT8) AS ordinal_position, CAST(CASE WHEN proargmodes IS NULL THEN 'IN' WHEN proargmodes[(ss.x).n] = 'i' THEN 'IN' WHEN proargmodes[(ss.x).n] = 'o' THEN 'OUT' WHEN proargmodes[(ss.x).n] = 'b' THEN 'INOUT' WHEN proargmodes[(ss.x).n] = 'v' THEN 'IN' WHEN proargmodes[(ss.x).n] = 't' THEN 'OUT' END AS STRING) AS parameter_mode, CAST('NO' AS STRING) AS is_result, CAST('NO' AS STRING) AS as_locator, CAST(NULLIF(proargnames[(ss.x).n], '') AS STRING) AS parameter_name, CAST(CASE WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(current_database() AS STRING) AS udt_catalog, CAST(nt.nspname AS STRING) AS udt_schema, CAST(t.typname AS STRING) AS udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST((ss.x).n AS STRING) AS dtd_identifier, CAST(CASE WHEN pg_has_role(proowner, 'USAGE') THEN pg_get_function_arg_default(p_oid, (ss.x).n) ELSE NULL END AS STRING) AS parameter_default FROM pg_type AS t, pg_namespace AS nt, (SELECT n.nspname AS n_nspname, p.proname, p.oid AS p_oid, p.proowner, p.proargnames, p.proargmodes, information_schema._pg_expandarray(COALESCE(p.proallargtypes, p.proargtypes::OID[])) AS x FROM pg_namespace AS n, pg_proc AS p WHERE (n.oid = p.pronamespace) AND (pg_has_role(p.proowner, 'USAGE') OR has_function_privilege(p.oid, 'EXECUTE'))) AS ss WHERE (t.oid = (ss.x).x) AND (t.typnamespace = nt.oid)"}} -4294967153 {"table": {"columns": [{"id": 1, "name": "insufficient_privileges", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 2, "name": "missing_bytes_beyond_max_mem_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "query", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "trace", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967153, "name": "optimizer_trace", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967154 {"table": {"columns": [{"id": 1, "name": "word", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "reserved", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967154, "name": "keywords", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967155 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ordinal_position", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "position_in_unique_constraint", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967155, "name": "key_column_usage", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967156 {"table": {"columns": [{"id": 1, "name": "catalog_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967156, "name": "information_schema_catalog_name", "nextColumnId": 2, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967157 {"table": {"columns": [{"id": 1, "name": "foreign_table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967157, "name": "foreign_tables", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967158 {"table": {"columns": [{"id": 1, "name": "foreign_table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967158, "name": "foreign_table_options", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967159 {"table": {"columns": [{"id": 1, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_server_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "foreign_server_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967159, "name": "foreign_servers", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967160 {"table": {"columns": [{"id": 1, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967160, "name": "foreign_server_options", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967161 {"table": {"columns": [{"id": 1, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "library_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_data_wrapper_language", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967161, "name": "foreign_data_wrappers", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967162 {"table": {"columns": [{"id": 1, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967162, "name": "foreign_data_wrapper_options", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967163 {"table": {"columns": [{"id": 1, "name": "last_update_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 2, "name": "table_rows", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "autoextend_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "check_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "checksum", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 6, "name": "extra", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "file_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "table_name", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 9, "name": "avg_row_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 10, "name": "extent_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "file_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "free_extents", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "max_data_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 14, "name": "table_schema", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 15, "name": "update_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 16, "name": "data_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 17, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "version", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "create_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 20, "name": "initial_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "logfile_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "maximum_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "update_count", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 25, "name": "creation_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 26, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "fulltext_keys", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 28, "name": "row_format", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "total_extents", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 30, "name": "data_free", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 31, "name": "index_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 32, "name": "last_access_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 33, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 34, "name": "transaction_counter", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 35, "name": "file_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "logfile_group_number", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 37, "name": "recover_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 38, "name": "deleted_rows", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967163, "name": "files", "nextColumnId": 39, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967164 {"table": {"columns": [{"id": 1, "name": "definer", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "event_definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "event_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "interval_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "last_altered", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 6, "name": "on_completion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "originator", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "collation_connection", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "database_collation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "event_body", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "event_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "execute_at", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "interval_field", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "starts", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 15, "name": "time_zone", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "character_set_client", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "ends", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "event_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "event_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "event_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "last_executed", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 22, "name": "sql_mode", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 23, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967164, "name": "events", "nextColumnId": 25, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967165 {"table": {"columns": [{"id": 1, "name": "support", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "transactions", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "xa", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "savepoints", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967165, "name": "engines", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967166 {"table": {"columns": [{"id": 1, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967166, "name": "enabled_roles", "nextColumnId": 2, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967167 {"table": {"columns": [{"id": 1, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "collection_type_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "domain_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 29, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967167, "name": "element_types", "nextColumnId": 30, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967168 {"table": {"columns": [{"id": 1, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "domain_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967168, "name": "domains", "nextColumnId": 28, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967169 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967169, "name": "domain_udt_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967170 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_deferrable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "initially_deferred", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967170, "name": "domain_constraints", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967171 {"table": {"columns": [{"id": 1, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967171, "name": "data_type_privileges", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967172 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967172, "name": "constraint_table_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967173 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967173, "name": "constraint_column_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967174 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "column_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "ordinal_position", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "column_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_nullable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "data_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 33, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 34, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 35, "name": "is_self_referencing", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "is_identity", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 37, "name": "identity_generation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 38, "name": "identity_start", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 39, "name": "identity_increment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 40, "name": "identity_maximum", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 41, "name": "identity_minimum", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 42, "name": "identity_cycle", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 43, "name": "is_generated", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 44, "name": "generation_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 45, "name": "is_updatable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 46, "name": "is_hidden", "type": {"family": "StringFamily", "oid": 25}}, {"id": 47, "name": "crdb_sql_type", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967174, "name": "columns", "nextColumnId": 48, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967175 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967175, "name": "columns_extensions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967176 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967176, "name": "column_udt_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967177 {"table": {"columns": [{"id": 1, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "histogram", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "schema_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967177, "name": "column_statistics", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967178 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967178, "name": "column_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967179 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967179, "name": "column_options", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967180 {"table": {"columns": [{"id": 1, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967180, "name": "column_domain_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967181 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "dependent_column", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967181, "name": "column_column_usage", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967182 {"table": {"columns": [{"id": 1, "name": "collation_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "collation_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collation_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "pad_attribute", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967182, "name": "collations", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967183 {"table": {"columns": [{"id": 1, "name": "collation_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "collation_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collation_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "character_set_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967183, "name": "collation_character_set_applicability", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967184 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "check_clause", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967184, "name": "check_constraints", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967185 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967185, "name": "check_constraint_routine_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967186 {"table": {"columns": [{"id": 1, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "character_set_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "character_repertoire", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "form_of_use", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "default_collate_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "default_collate_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "default_collate_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967186, "name": "character_sets", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967187 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "attribute_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "attribute_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_nullable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 20, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "attribute_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "attribute_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "attribute_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 30, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "is_derived_reference_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967187, "name": "attributes", "nextColumnId": 32, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS udt_catalog, CAST(nc.nspname AS STRING) AS udt_schema, CAST(c.relname AS STRING) AS udt_name, CAST(a.attname AS STRING) AS attribute_name, CAST(a.attnum AS INT8) AS ordinal_position, CAST(pg_get_expr(ad.adbin, ad.adrelid) AS STRING) AS attribute_default, CAST(CASE WHEN a.attnotnull OR ((t.typtype = 'd') AND t.typnotnull) THEN 'NO' ELSE 'YES' END AS STRING) AS is_nullable, CAST(CASE WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(information_schema._pg_char_max_length(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS character_maximum_length, CAST(information_schema._pg_char_octet_length(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS STRING) AS collation_catalog, CAST(nco.nspname AS STRING) AS collation_schema, CAST(co.collname AS STRING) AS collation_name, CAST(information_schema._pg_numeric_precision(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_precision, CAST(information_schema._pg_numeric_precision_radix(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_precision_radix, CAST(information_schema._pg_numeric_scale(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_scale, CAST(information_schema._pg_datetime_precision(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS datetime_precision, CAST(information_schema._pg_interval_type(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(current_database() AS STRING) AS attribute_udt_catalog, CAST(nt.nspname AS STRING) AS attribute_udt_schema, CAST(t.typname AS STRING) AS attribute_udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST(a.attnum AS STRING) AS dtd_identifier, CAST('NO' AS STRING) AS is_derived_reference_attribute FROM (pg_attribute AS a LEFT JOIN pg_attrdef AS ad ON (attrelid = adrelid) AND (attnum = adnum)) JOIN (pg_class AS c JOIN pg_namespace AS nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid JOIN (pg_type AS t JOIN pg_namespace AS nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid LEFT JOIN (pg_collation AS co JOIN pg_namespace AS nco ON (co.collnamespace = nco.oid)) ON (a.attcollation = co.oid) AND ((nco.nspname, co.collname) != ('pg_catalog', 'default')) WHERE (((a.attnum > 0) AND (NOT a.attisdropped)) AND (c.relkind IN ('c',))) AND (pg_has_role(c.relowner, 'USAGE') OR has_type_privilege(c.reltype, 'USAGE'))"}} -4294967188 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_grantable", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967188, "name": "applicable_roles", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967189 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_grantable", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967189, "name": "administrable_role_authorizations", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967190, "version": "1"}} -4294967190 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294967190, "name": "information_schema", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} +4294966969 {"table": {"columns": [{"id": 1, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "auth_name", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 256}}, {"id": 3, "name": "auth_srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "srtext", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 2048}}, {"id": 5, "name": "proj4text", "nullable": true, "type": {"family": "StringFamily", "oid": 1043, "visibleType": 7, "width": 2048}}], "formatVersion": 3, "id": 4294966969, "name": "spatial_ref_sys", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966972, "version": "1"}} +4294966970 {"table": {"columns": [{"id": 1, "name": "f_table_catalog", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "f_table_schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "f_table_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "f_geometry_column", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "coord_dimension", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966970, "name": "geometry_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966972, "version": "1"}} +4294966971 {"table": {"columns": [{"id": 1, "name": "f_table_catalog", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "f_table_schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "f_table_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "f_geography_column", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "coord_dimension", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "srid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966971, "name": "geography_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294966972, "version": "1"}} +4294966972 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294966972, "name": "pg_extension", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} +4294966973 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "viewname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "viewowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966973, "name": "pg_views", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966974 {"table": {"columns": [{"id": 1, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usecreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "usesuper", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "userepl", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "usebypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "passwd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "valuntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "useconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966974, "name": "pg_user", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966975 {"table": {"columns": [{"id": 1, "name": "umid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srvid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "srvname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "umuser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "umoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966975, "name": "pg_user_mappings", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966976 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "umuser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "umserver", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "umoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966976, "name": "pg_user_mapping", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966977 {"table": {"columns": [{"id": 1, "name": "oid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "typname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "typnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "typowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "typlen", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "typbyval", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "typtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 8, "name": "typcategory", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 9, "name": "typispreferred", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "typisdefined", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "typdelim", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 12, "name": "typrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "typelem", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "typarray", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "typinput", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 16, "name": "typoutput", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 17, "name": "typreceive", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 18, "name": "typsend", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 19, "name": "typmodin", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 20, "name": "typmodout", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 21, "name": "typanalyze", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 22, "name": "typalign", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 23, "name": "typstorage", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 24, "name": "typnotnull", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "typbasetype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 26, "name": "typtypmod", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 27, "name": "typndims", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 28, "name": "typcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 29, "name": "typdefaultbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "typdefault", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "typacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966977, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_type_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], "storeColumnNames": ["typname", "typnamespace", "typowner", "typlen", "typbyval", "typtype", "typcategory", "typispreferred", "typisdefined", "typdelim", "typrelid", "typelem", "typarray", "typinput", "typoutput", "typreceive", "typsend", "typmodin", "typmodout", "typanalyze", "typalign", "typstorage", "typnotnull", "typbasetype", "typtypmod", "typndims", "typcollation", "typdefaultbin", "typdefault", "typacl"], "version": 3}], "name": "pg_type", "nextColumnId": 32, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966978 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "tmplname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tmplnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "tmplinit", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "tmpllexize", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966978, "name": "pg_ts_template", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966979 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "prsname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "prsnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "prsstart", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "prstoken", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 6, "name": "prsend", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 7, "name": "prsheadline", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 8, "name": "prslextype", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966979, "name": "pg_ts_parser", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966980 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "dictname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "dictnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "dictowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "dicttemplate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "dictinitoption", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966980, "name": "pg_ts_dict", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966981 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "cfgname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "cfgnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "cfgowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "cfgparser", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966981, "name": "pg_ts_config", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966982 {"table": {"columns": [{"id": 1, "name": "mapcfg", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "maptokentype", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "mapseqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "mapdict", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966982, "name": "pg_ts_config_map", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966983 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "tgrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "tgname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tgfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "tgtype", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "tgenabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 7, "name": "tgisinternal", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "tgconstrrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "tgconstrindid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "tgconstraint", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "tgdeferrable", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "tginitdeferred", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "tgnargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 14, "name": "tgattr", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 15, "name": "tgargs", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 16, "name": "tgqual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "tgoldtable", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 18, "name": "tgnewtable", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 19, "name": "tgparentid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294966983, "name": "pg_trigger", "nextColumnId": 20, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966984 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "trftype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "trflang", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "trffromsql", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "trftosql", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294966984, "name": "pg_transform", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966985 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "abbrev", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "utc_offset", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 4, "name": "is_dst", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966985, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["name"], "name": "pg_timezone_names_name_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["abbrev", "utc_offset", "is_dst"], "version": 3}], "name": "pg_timezone_names", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966986 {"table": {"columns": [{"id": 1, "name": "abbrev", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "utc_offset", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 3, "name": "is_dst", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966986, "name": "pg_timezone_abbrevs", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966987 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "spcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "spcowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "spclocation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "spcacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 6, "name": "spcoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966987, "name": "pg_tablespace", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966988 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tableowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "hasindexes", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "hasrules", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "hastriggers", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rowsecurity", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294966988, "name": "pg_tables", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966989 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "subdbid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "subname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "subowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "subenabled", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "subconninfo", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "subslotname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 8, "name": "subsynccommit", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "subpublications", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966989, "name": "pg_subscription", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966990 {"table": {"columns": [{"id": 1, "name": "srsubid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "srsubstate", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 4, "name": "srsublsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294966990, "name": "pg_subscription_rel", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966991 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "attname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "inherited", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "null_frac", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 6, "name": "avg_width", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "n_distinct", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 8, "name": "most_common_vals", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 9, "name": "most_common_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 10, "name": "histogram_bounds", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 11, "name": "correlation", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 12, "name": "most_common_elems", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 13, "name": "most_common_elem_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 14, "name": "elem_count_histogram", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}], "formatVersion": 3, "id": 4294966991, "name": "pg_stats", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966992 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "statistics_schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "statistics_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "statistics_owner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "attnames", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 7, "name": "kinds", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}, {"id": 8, "name": "n_distinct", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 9, "name": "dependencies", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 10, "name": "most_common_vals", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 11, "name": "most_common_val_nulls", "nullable": true, "type": {"arrayContents": {"oid": 16}, "arrayElemType": "BoolFamily", "family": "ArrayFamily", "oid": 1000}}, {"id": 12, "name": "most_common_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 701, "width": 64}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1022, "width": 64}}, {"id": 13, "name": "most_common_base_freqs", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 701, "width": 64}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1022, "width": 64}}], "formatVersion": 3, "id": 4294966992, "name": "pg_stats_ext", "nextColumnId": 14, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966993 {"table": {"columns": [{"id": 1, "name": "starelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "staattnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 3, "name": "stainherit", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "stanullfrac", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 5, "name": "stawidth", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "stadistinct", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 7, "name": "stakind1", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 8, "name": "stakind2", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 9, "name": "stakind3", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 10, "name": "stakind4", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 11, "name": "stakind5", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 12, "name": "staop1", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "staop2", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "staop3", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "staop4", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 16, "name": "staop5", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 17, "name": "stacoll1", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "stacoll2", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 19, "name": "stacoll3", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "stacoll4", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "stacoll5", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 22, "name": "stanumbers1", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 23, "name": "stanumbers2", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 24, "name": "stanumbers3", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 25, "name": "stanumbers4", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 26, "name": "stanumbers5", "nullable": true, "type": {"arrayContents": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}, "arrayElemType": "FloatFamily", "family": "ArrayFamily", "oid": 1021, "visibleType": 5, "width": 32}}, {"id": 27, "name": "stavalues1", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 28, "name": "stavalues2", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 29, "name": "stavalues3", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 30, "name": "stavalues4", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 31, "name": "stavalues5", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294966993, "name": "pg_statistic", "nextColumnId": 32, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966994 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "stxrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "stxname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "stxnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "stxowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "stxstattarget", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "stxkeys", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 8, "name": "stxkind", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294966994, "name": "pg_statistic_ext", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966995 {"table": {"columns": [{"id": 1, "name": "stxoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "stxdndistinct", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "stxddependencies", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "stxdmcv", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294966995, "name": "pg_statistic_ext_data", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966996 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966996, "name": "pg_statio_user_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966997 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966997, "name": "pg_statio_user_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966998 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966998, "name": "pg_statio_user_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294966999 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294966999, "name": "pg_statio_sys_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967000 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967000, "name": "pg_statio_sys_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967001 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967001, "name": "pg_statio_sys_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967002 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "heap_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "heap_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "toast_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "toast_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tidx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tidx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967002, "name": "pg_statio_all_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967003 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967003, "name": "pg_statio_all_sequences", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967004 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967004, "name": "pg_statio_all_indexes", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967005 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967005, "name": "pg_stat_xact_user_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967006 {"table": {"columns": [{"id": 1, "name": "funcid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "funcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "calls", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "total_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "self_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967006, "name": "pg_stat_xact_user_functions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967007 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967007, "name": "pg_stat_xact_sys_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967008 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967008, "name": "pg_stat_xact_all_tables", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967009 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "receive_start_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "receive_start_tli", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "written_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "flushed_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "received_tli", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "last_msg_send_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "last_msg_receipt_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 10, "name": "latest_end_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "latest_end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 12, "name": "slot_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "sender_host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "sender_port", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 15, "name": "conninfo", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967009, "name": "pg_stat_wal_receiver", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967010 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967010, "name": "pg_stat_user_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967011 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967011, "name": "pg_stat_user_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967012 {"table": {"columns": [{"id": 1, "name": "funcid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "funcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "calls", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "total_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 6, "name": "self_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967012, "name": "pg_stat_user_functions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967013 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967013, "name": "pg_stat_sys_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967014 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967014, "name": "pg_stat_sys_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967015 {"table": {"columns": [{"id": 1, "name": "subid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "subname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "received_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "last_msg_send_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 7, "name": "last_msg_receipt_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 8, "name": "latest_end_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "latest_end_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967015, "name": "pg_stat_subscription", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967016 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "ssl", "nullable": true, "type": {"oid": 16}}, {"id": 3, "name": "version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "cipher", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "bits", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "compression", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "client_dn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "client_serial", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 9, "name": "issuer_dn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967016, "name": "pg_stat_ssl", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967017 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "blks_zeroed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "blks_written", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "blks_exists", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "flushes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "truncates", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967017, "name": "pg_stat_slru", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967018 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "application_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "client_addr", "nullable": true, "type": {"family": "INetFamily", "oid": 869}}, {"id": 6, "name": "client_hostname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "client_port", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "backend_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "backend_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "sent_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "write_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "flush_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "replay_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "write_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 16, "name": "flush_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 17, "name": "replay_lag", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 18, "name": "sync_priority", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 19, "name": "sync_state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "reply_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967018, "name": "pg_stat_replication", "nextColumnId": 21, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967019 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "heap_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "heap_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "heap_blks_vacuumed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "index_vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "max_dead_tuples", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "num_dead_tuples", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967019, "name": "pg_stat_progress_vacuum", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967020 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "index_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "lockers_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "lockers_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "current_locker_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "blocks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "blocks_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "tuples_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "tuples_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "partitions_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "partitions_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967020, "name": "pg_stat_progress_create_index", "nextColumnId": 17, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967021 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "cluster_index_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "heap_tuples_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "heap_tuples_written", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "heap_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "heap_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "index_rebuild_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967021, "name": "pg_stat_progress_cluster", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967022 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "backup_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "backup_streamed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "tablespaces_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "tablespaces_streamed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967022, "name": "pg_stat_progress_basebackup", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967023 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "phase", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "sample_blks_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "sample_blks_scanned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "ext_stats_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "ext_stats_computed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "child_tables_total", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "child_tables_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "current_child_table_relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967023, "name": "pg_stat_progress_analyze", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967024 {"table": {"columns": [{"id": 1, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "gss_authenticated", "nullable": true, "type": {"oid": 16}}, {"id": 3, "name": "principal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "encrypted", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967024, "name": "pg_stat_gssapi", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967025 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "numbackends", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "xact_commit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "xact_rollback", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "blks_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "blks_hit", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "tup_returned", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "tup_fetched", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "tup_inserted", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "tup_updated", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "tup_deleted", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "conflicts", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "temp_files", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "temp_bytes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "deadlocks", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "checksum_failures", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "checksum_last_failure", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "blk_read_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 20, "name": "blk_write_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 21, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967025, "name": "pg_stat_database", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967026 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "confl_tablespace", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "confl_lock", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "confl_snapshot", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "confl_bufferpin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "confl_deadlock", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967026, "name": "pg_stat_database_conflicts", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967027 {"table": {"columns": [{"id": 1, "name": "checkpoints_timed", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "checkpoints_req", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "checkpoint_write_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 4, "name": "checkpoint_sync_time", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 5, "name": "buffers_checkpoint", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "buffers_clean", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "maxwritten_clean", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "buffers_backend", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "buffers_backend_fsync", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "buffers_alloc", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967027, "name": "pg_stat_bgwriter", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967028 {"table": {"columns": [{"id": 1, "name": "archived_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "last_archived_wal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "last_archived_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "failed_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "last_failed_wal", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "last_failed_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 7, "name": "stats_reset", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967028, "name": "pg_stat_archiver", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967029 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "seq_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seq_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "n_tup_ins", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "n_tup_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "n_tup_del", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "n_tup_hot_upd", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "n_live_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "n_dead_tup", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "n_mod_since_analyze", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "n_ins_since_vacuum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "last_vacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 17, "name": "last_autovacuum", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "last_analyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 19, "name": "last_autoanalyze", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "vacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "autovacuum_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "analyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "autoanalyze_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967029, "name": "pg_stat_all_tables", "nextColumnId": 24, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967030 {"table": {"columns": [{"id": 1, "name": "relid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "relname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "indexrelname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "idx_scan", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "idx_tup_read", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "idx_tup_fetch", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967030, "name": "pg_stat_all_indexes", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967031 {"table": {"columns": [{"id": 1, "name": "datid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "application_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "client_addr", "nullable": true, "type": {"family": "INetFamily", "oid": 869}}, {"id": 8, "name": "client_hostname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "client_port", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "backend_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 11, "name": "xact_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 12, "name": "query_start", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "state_change", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 14, "name": "wait_event_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "wait_event", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "backend_xid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "backend_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "query", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "backend_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "leader_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}], "formatVersion": 3, "id": 4294967031, "name": "pg_stat_activity", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967032 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "off", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "allocated_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967032, "name": "pg_shmem_allocations", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967033 {"table": {"columns": [{"id": 1, "name": "dbid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "refclassid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "refobjid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "deptype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967033, "name": "pg_shdepend", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967034 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967034, "name": "pg_shseclabel", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967035 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967035, "name": "pg_shdescription", "nextColumnId": 4, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1", "viewQuery": "SELECT objoid, classoid, description FROM \"\".crdb_internal.kv_catalog_comments WHERE classoid = 4294967084:::OID"}} +4294967036 {"table": {"columns": [{"id": 1, "name": "usename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "usesysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "usecreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "usesuper", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "userepl", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "usebypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "passwd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "valuntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "useconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967036, "name": "pg_shadow", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967037 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "unit", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "short_desc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "extra_desc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "context", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "vartype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "source", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "min_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "max_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "enumvals", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "boot_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "reset_val", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "sourcefile", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "sourceline", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "pending_restart", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967037, "name": "pg_settings", "nextColumnId": 18, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967038 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "sequencename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "sequenceowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "data_type", "nullable": true, "type": {"family": "OidFamily", "oid": 2206}}, {"id": 5, "name": "start_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "min_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "max_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "increment_by", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "cycle", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "cache_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "last_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967038, "name": "pg_sequences", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967039 {"table": {"columns": [{"id": 1, "name": "seqrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "seqtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "seqstart", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "seqincrement", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "seqmax", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "seqmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "seqcache", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "seqcycle", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967039, "name": "pg_sequence", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967040 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967040, "name": "pg_seclabel", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967041 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "objtype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "objnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "objname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "provider", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "label", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967041, "name": "pg_seclabels", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967042 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rulename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967042, "name": "pg_rules", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967043 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rolname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rolsuper", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "rolinherit", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "rolcreaterole", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "rolcreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "rolcatupdate", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rolcanlogin", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "rolreplication", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "rolconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "rolpassword", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "rolvaliduntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "rolbypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "rolconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967043, "name": "pg_roles", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967044 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rulename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "ev_class", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "ev_type", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "ev_enabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "is_instead", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "ev_qual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ev_action", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967044, "name": "pg_rewrite", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967045 {"table": {"columns": [{"id": 1, "name": "slot_name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "plugin", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "slot_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "datoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "database", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "temporary", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "active", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "active_pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 9, "name": "xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "catalog_xmin", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "restart_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "confirmed_flush_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "wal_status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "safe_wal_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967045, "name": "pg_replication_slots", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967046 {"table": {"columns": [{"id": 1, "name": "roident", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "roname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967046, "name": "pg_replication_origin", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967047 {"table": {"columns": [{"id": 1, "name": "local_id", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "external_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "remote_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "local_lsn", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967047, "name": "pg_replication_origin_status", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967048 {"table": {"columns": [{"id": 1, "name": "rngtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rngsubtype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "rngcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "rngsubopc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "rngcanonical", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "rngsubdiff", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967048, "name": "pg_range", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967049 {"table": {"columns": [{"id": 1, "name": "pubname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}], "formatVersion": 3, "id": 4294967049, "name": "pg_publication_tables", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967050 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "pubname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pubowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "puballtables", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "pubinsert", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "pubupdate", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "pubdelete", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "pubtruncate", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "pubviaroot", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967050, "name": "pg_publication", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967051 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "prpubid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "prrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967051, "name": "pg_publication_rel", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967052 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "proname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "pronamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "proowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "prolang", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "procost", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 7, "name": "prorows", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 8, "name": "provariadic", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "prosupport", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 10, "name": "prokind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 11, "name": "prosecdef", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "proleakproof", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "proisstrict", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "proretset", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "provolatile", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 16, "name": "proparallel", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "pronargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 18, "name": "pronargdefaults", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 19, "name": "prorettype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "proargtypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 21, "name": "proallargtypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 22, "name": "proargmodes", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1002, "visibleType": 9, "width": 1}}, {"id": 23, "name": "proargnames", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 24, "name": "proargdefaults", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "protrftypes", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 26, "name": "prosrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "probin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "prosqlbody", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "proconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 30, "name": "proacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967052, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_proc_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "storeColumnNames": ["proname", "pronamespace", "proowner", "prolang", "procost", "prorows", "provariadic", "prosupport", "prokind", "prosecdef", "proleakproof", "proisstrict", "proretset", "provolatile", "proparallel", "pronargs", "pronargdefaults", "prorettype", "proargtypes", "proallargtypes", "proargmodes", "proargnames", "proargdefaults", "protrftypes", "prosrc", "probin", "prosqlbody", "proconfig", "proacl"], "version": 3}], "name": "pg_proc", "nextColumnId": 31, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967053 {"table": {"columns": [{"id": 1, "name": "transaction", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "gid", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "prepared", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "owner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "database", "nullable": true, "type": {"family": 11, "oid": 19}}], "formatVersion": 3, "id": 4294967053, "name": "pg_prepared_xacts", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967054 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "prepare_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 4, "name": "parameter_types", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 2206}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 2211}}, {"id": 5, "name": "from_sql", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967054, "name": "pg_prepared_statements", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967055 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "polname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "polrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "polcmd", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "polpermissive", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "polroles", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 7, "name": "polqual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "polwithcheck", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967055, "name": "pg_policy", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967056 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "policyname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "permissive", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "roles", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 6, "name": "cmd", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "qual", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_check", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967056, "name": "pg_policies", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967057 {"table": {"columns": [{"id": 1, "name": "partrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "partstrat", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 3, "name": "partnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "partdefid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "partattrs", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 6, "name": "partclass", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 7, "name": "partcollation", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 8, "name": "partexprs", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967057, "name": "pg_partitioned_table", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967058 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "opfmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "opfname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "opfnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "opfowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967058, "name": "pg_opfamily", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967059 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "oprname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "oprnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "oprowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "oprkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "oprcanmerge", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "oprcanhash", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "oprleft", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "oprright", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "oprresult", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "oprcom", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 12, "name": "oprnegate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 13, "name": "oprcode", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "oprrest", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "oprjoin", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967059, "name": "pg_operator", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967060 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "opcmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "opcname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "opcnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "opcowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "opcfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "opcintype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "opcdefault", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "opckeytype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967060, "name": "pg_opclass", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967061 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "nspname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "nspowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "nspacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967061, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_namespace_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["nspname", "nspowner", "nspacl"], "version": 3}], "name": "pg_namespace", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967062 {"table": {"columns": [{"id": 1, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "matviewname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "matviewowner", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "hasindexes", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "ispopulated", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967062, "name": "pg_matviews", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967063 {"table": {"columns": [{"id": 1, "name": "locktype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "database", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "relation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "page", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "tuple", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "virtualxid", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "transactionid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 11, "name": "virtualtransaction", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "pid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 13, "name": "mode", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "granted", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "fastpath", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967063, "name": "pg_locks", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967064 {"table": {"columns": [{"id": 1, "name": "loid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "pageno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "data", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967064, "name": "pg_largeobject", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967065 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "lomowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "lomacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967065, "name": "pg_largeobject_metadata", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967066 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "lanname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "lanowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "lanispl", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "lanpltrusted", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "lanplcallfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "laninline", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "lanvalidator", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "lanacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967066, "name": "pg_language", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967067 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "privtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "initprivs", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967067, "name": "pg_init_privs", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967068 {"table": {"columns": [{"id": 1, "name": "inhrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "inhparent", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "inhseqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}], "formatVersion": 3, "id": 4294967068, "name": "pg_inherits", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967069 {"table": {"columns": [{"id": 1, "name": "crdb_oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "schemaname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "tablename", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 4, "name": "indexname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 5, "name": "tablespace", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 6, "name": "indexdef", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967069, "name": "pg_indexes", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967070 {"table": {"columns": [{"id": 1, "name": "indexrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "indrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "indnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "indisunique", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "indnullsnotdistinct", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "indisprimary", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "indisexclusion", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "indimmediate", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "indisclustered", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "indisvalid", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "indcheckxmin", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "indisready", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "indislive", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "indisreplident", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "indkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 16, "name": "indcollation", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 17, "name": "indclass", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": 201, "oid": 30}}, {"id": 18, "name": "indoption", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": 200, "oid": 22, "width": 16}}, {"id": 19, "name": "indexprs", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "indpred", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "indnkeyatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}], "formatVersion": 3, "id": 4294967070, "name": "pg_index", "nextColumnId": 22, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967071 {"table": {"columns": [{"id": 1, "name": "line_number", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 2, "name": "type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "database", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 4, "name": "user_name", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 5, "name": "address", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "netmask", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "auth_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "options", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 9, "name": "error", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967071, "name": "pg_hba_file_rules", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967072 {"table": {"columns": [{"id": 1, "name": "groname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "grosysid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "grolist", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}], "formatVersion": 3, "id": 4294967072, "name": "pg_group", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967073 {"table": {"columns": [{"id": 1, "name": "ftrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "ftserver", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "ftoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967073, "name": "pg_foreign_table", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967074 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "srvname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "srvowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "srvfdw", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "srvtype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "srvversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "srvacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 8, "name": "srvoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967074, "name": "pg_foreign_server", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967075 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "fdwname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "fdwowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "fdwhandler", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "fdwvalidator", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "fdwacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 7, "name": "fdwoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967075, "name": "pg_foreign_data_wrapper", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967076 {"table": {"columns": [{"id": 1, "name": "sourcefile", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "sourceline", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 3, "name": "seqno", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "applied", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "error", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967076, "name": "pg_file_settings", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967077 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "extname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "extowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "extnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "extrelocatable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "extversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "extconfig", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "extcondition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967077, "name": "pg_extension", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967078 {"table": {"columns": [{"id": 1, "name": "evtname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "evtevent", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "evtowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "evtfoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "evtenabled", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "evttags", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 7, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967078, "name": "pg_event_trigger", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967079 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "enumtypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "enumsortorder", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 4, "name": "enumlabel", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967079, "name": "pg_enum", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967080 {"table": {"columns": [{"id": 1, "name": "objoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "classoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967080, "name": "pg_description", "nextColumnId": 5, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1", "viewQuery": "SELECT objoid, classoid, objsubid, description FROM crdb_internal.kv_catalog_comments WHERE classoid != 4294967084 UNION ALL SELECT oid AS objoid, 4294967052:::OID AS classoid, 0:::INT4 AS objsubid, description AS description FROM crdb_internal.kv_builtin_function_comments"}} +4294967081 {"table": {"columns": [{"id": 1, "name": "classid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "objid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "objsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 4, "name": "refclassid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "refobjid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "refobjsubid", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "deptype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967081, "name": "pg_depend", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967082 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "defaclrole", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "defaclnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "defaclobjtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "defaclacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967082, "name": "pg_default_acl", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967083 {"table": {"columns": [{"id": 1, "name": "setconfig", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 2, "name": "setdatabase", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "setrole", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967083, "name": "pg_db_role_setting", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967084 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "datname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "datdba", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "encoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "datcollate", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "datctype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "datistemplate", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "datallowconn", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "datconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 10, "name": "datlastsysoid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "datfrozenxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "datminmxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "dattablespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "datacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967084, "name": "pg_database", "nextColumnId": 15, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967085 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_holdable", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "is_binary", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "is_scrollable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "creation_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967085, "name": "pg_cursors", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967086 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "conname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "connamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "conowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "conforencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "contoencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 7, "name": "conproc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "condefault", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967086, "name": "pg_conversion", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967087 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "conname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "connamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "contype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 5, "name": "condeferrable", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "condeferred", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "convalidated", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "conrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "contypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "conindid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 11, "name": "confrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 12, "name": "confupdtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 13, "name": "confdeltype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 14, "name": "confmatchtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 15, "name": "conislocal", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "coninhcount", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "connoinherit", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "conkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1005, "width": 16}}, {"id": 19, "name": "confkey", "nullable": true, "type": {"arrayContents": {"family": "IntFamily", "oid": 21, "width": 16}, "arrayElemType": "IntFamily", "family": "ArrayFamily", "oid": 1005, "width": 16}}, {"id": 20, "name": "conpfeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 21, "name": "conppeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 22, "name": "conffeqop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 23, "name": "conexclop", "nullable": true, "type": {"arrayContents": {"family": "OidFamily", "oid": 26}, "arrayElemType": "OidFamily", "family": "ArrayFamily", "oid": 1028}}, {"id": 24, "name": "conbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "consrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "condef", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "conparentid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967087, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [8], "keyColumnNames": ["conrelid"], "name": "pg_constraint_conrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27], "storeColumnNames": ["oid", "conname", "connamespace", "contype", "condeferrable", "condeferred", "convalidated", "contypid", "conindid", "confrelid", "confupdtype", "confdeltype", "confmatchtype", "conislocal", "coninhcount", "connoinherit", "conkey", "confkey", "conpfeqop", "conppeqop", "conffeqop", "conexclop", "conbin", "consrc", "condef", "conparentid"], "version": 3}], "name": "pg_constraint", "nextColumnId": 28, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967088 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "setting", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967088, "name": "pg_config", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967089 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "collname", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "collowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "collencoding", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 6, "name": "collcollate", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "collctype", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "collprovider", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 9, "name": "collversion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "collisdeterministic", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967089, "name": "pg_collation", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967090 {"table": {"columns": [{"id": 1, "name": "oid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "relname", "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "relnamespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "reltype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "reloftype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 6, "name": "relowner", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 7, "name": "relam", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "relfilenode", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "reltablespace", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 10, "name": "relpages", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "reltuples", "nullable": true, "type": {"family": "FloatFamily", "oid": 700, "visibleType": 5, "width": 32}}, {"id": 12, "name": "relallvisible", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 13, "name": "reltoastrelid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 14, "name": "relhasindex", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "relisshared", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "relpersistence", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "relistemp", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "relkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 19, "name": "relnatts", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 20, "name": "relchecks", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 21, "name": "relhasoids", "nullable": true, "type": {"oid": 16}}, {"id": 22, "name": "relhaspkey", "nullable": true, "type": {"oid": 16}}, {"id": 23, "name": "relhasrules", "nullable": true, "type": {"oid": 16}}, {"id": 24, "name": "relhastriggers", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "relhassubclass", "nullable": true, "type": {"oid": 16}}, {"id": 26, "name": "relfrozenxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "relacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 28, "name": "reloptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 29, "name": "relforcerowsecurity", "nullable": true, "type": {"oid": 16}}, {"id": 30, "name": "relispartition", "nullable": true, "type": {"oid": 16}}, {"id": 31, "name": "relispopulated", "nullable": true, "type": {"oid": 16}}, {"id": 32, "name": "relreplident", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 33, "name": "relrewrite", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 34, "name": "relrowsecurity", "nullable": true, "type": {"oid": 16}}, {"id": 35, "name": "relpartbound", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "relminmxid", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967090, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["oid"], "name": "pg_class_oid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], "storeColumnNames": ["relname", "relnamespace", "reltype", "reloftype", "relowner", "relam", "relfilenode", "reltablespace", "relpages", "reltuples", "relallvisible", "reltoastrelid", "relhasindex", "relisshared", "relpersistence", "relistemp", "relkind", "relnatts", "relchecks", "relhasoids", "relhaspkey", "relhasrules", "relhastriggers", "relhassubclass", "relfrozenxid", "relacl", "reloptions", "relforcerowsecurity", "relispartition", "relispopulated", "relreplident", "relrewrite", "relrowsecurity", "relpartbound", "relminmxid"], "version": 3}], "name": "pg_class", "nextColumnId": 37, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967091 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "castsource", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "casttarget", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "castfunc", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "castcontext", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 6, "name": "castmethod", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967091, "name": "pg_cast", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967092 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "default_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "installed_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967092, "name": "pg_available_extensions", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967093 {"table": {"columns": [{"id": 1, "name": "name", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 2, "name": "version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "installed", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "superuser", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "trusted", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "relocatable", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "schema", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 8, "name": "requires", "nullable": true, "type": {"arrayContents": {"family": 11, "oid": 19}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1003}}, {"id": 9, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967093, "name": "pg_available_extension_versions", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967094 {"table": {"columns": [{"id": 1, "name": "roleid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "member", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "grantor", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "admin_option", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967094, "name": "pg_auth_members", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967095 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "rolname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "rolsuper", "nullable": true, "type": {"oid": 16}}, {"id": 4, "name": "rolinherit", "nullable": true, "type": {"oid": 16}}, {"id": 5, "name": "rolcreaterole", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "rolcreatedb", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "rolcanlogin", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "rolreplication", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "rolbypassrls", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "rolconnlimit", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 11, "name": "rolpassword", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "rolvaliduntil", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967095, "name": "pg_authid", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967096 {"table": {"columns": [{"id": 1, "name": "attrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "attname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "atttypid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "attstattarget", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 5, "name": "attlen", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "attnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 7, "name": "attndims", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 8, "name": "attcacheoff", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 9, "name": "atttypmod", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 10, "name": "attbyval", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "attstorage", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 12, "name": "attalign", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 13, "name": "attnotnull", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "atthasdef", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "attidentity", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 16, "name": "attgenerated", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 17, "name": "attisdropped", "nullable": true, "type": {"oid": 16}}, {"id": 18, "name": "attislocal", "nullable": true, "type": {"oid": 16}}, {"id": 19, "name": "attinhcount", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 20, "name": "attcollation", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "attacl", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 22, "name": "attoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 23, "name": "attfdwoptions", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 24, "name": "atthasmissing", "nullable": true, "type": {"oid": 16}}, {"id": 25, "name": "attmissingval", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 26, "name": "attishidden", "nullable": true, "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967096, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["attrelid"], "name": "pg_attribute_attrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26], "storeColumnNames": ["attname", "atttypid", "attstattarget", "attlen", "attnum", "attndims", "attcacheoff", "atttypmod", "attbyval", "attstorage", "attalign", "attnotnull", "atthasdef", "attidentity", "attgenerated", "attisdropped", "attislocal", "attinhcount", "attcollation", "attacl", "attoptions", "attfdwoptions", "atthasmissing", "attmissingval", "attishidden"], "version": 3}], "name": "pg_attribute", "nextColumnId": 27, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967097 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "adrelid", "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "adnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "adbin", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "adsrc", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967097, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["adrelid"], "name": "pg_attrdef_adrelid_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [1, 3, 4, 5], "storeColumnNames": ["oid", "adnum", "adbin", "adsrc"], "version": 3}], "name": "pg_attrdef", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967098 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amprocfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "amproclefttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "amprocrighttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "amprocnum", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "amproc", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}], "formatVersion": 3, "id": 4294967098, "name": "pg_amproc", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967099 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amopfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 3, "name": "amoplefttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 4, "name": "amoprighttype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 5, "name": "amopstrategy", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 6, "name": "amoppurpose", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 7, "name": "amopopr", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 8, "name": "amopmethod", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 9, "name": "amopsortfamily", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}], "formatVersion": 3, "id": 4294967099, "name": "pg_amop", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967100 {"table": {"columns": [{"id": 1, "name": "oid", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 2, "name": "amname", "nullable": true, "type": {"family": 11, "oid": 19}}, {"id": 3, "name": "amstrategies", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "amsupport", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 5, "name": "amcanorder", "nullable": true, "type": {"oid": 16}}, {"id": 6, "name": "amcanorderbyop", "nullable": true, "type": {"oid": 16}}, {"id": 7, "name": "amcanbackward", "nullable": true, "type": {"oid": 16}}, {"id": 8, "name": "amcanunique", "nullable": true, "type": {"oid": 16}}, {"id": 9, "name": "amcanmulticol", "nullable": true, "type": {"oid": 16}}, {"id": 10, "name": "amoptionalkey", "nullable": true, "type": {"oid": 16}}, {"id": 11, "name": "amsearcharray", "nullable": true, "type": {"oid": 16}}, {"id": 12, "name": "amsearchnulls", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "amstorage", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "amclusterable", "nullable": true, "type": {"oid": 16}}, {"id": 15, "name": "ampredlocks", "nullable": true, "type": {"oid": 16}}, {"id": 16, "name": "amkeytype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 17, "name": "aminsert", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "ambeginscan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 19, "name": "amgettuple", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 20, "name": "amgetbitmap", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 21, "name": "amrescan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 22, "name": "amendscan", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 23, "name": "ammarkpos", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 24, "name": "amrestrpos", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 25, "name": "ambuild", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 26, "name": "ambuildempty", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 27, "name": "ambulkdelete", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 28, "name": "amvacuumcleanup", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 29, "name": "amcanreturn", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 30, "name": "amcostestimate", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 31, "name": "amoptions", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 32, "name": "amhandler", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 33, "name": "amtype", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967100, "name": "pg_am", "nextColumnId": 34, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967101 {"table": {"columns": [{"id": 1, "name": "aggfnoid", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 2, "name": "aggkind", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 3, "name": "aggnumdirectargs", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 4, "name": "aggtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 5, "name": "aggfinalfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 6, "name": "aggcombinefn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 7, "name": "aggserialfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 8, "name": "aggdeserialfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 9, "name": "aggmtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 10, "name": "aggminvtransfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 11, "name": "aggmfinalfn", "nullable": true, "type": {"family": "OidFamily", "oid": 24}}, {"id": 12, "name": "aggfinalextra", "nullable": true, "type": {"oid": 16}}, {"id": 13, "name": "aggmfinalextra", "nullable": true, "type": {"oid": 16}}, {"id": 14, "name": "aggsortop", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 15, "name": "aggtranstype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 16, "name": "aggtransspace", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 17, "name": "aggmtranstype", "nullable": true, "type": {"family": "OidFamily", "oid": 26}}, {"id": 18, "name": "aggmtransspace", "nullable": true, "type": {"family": "IntFamily", "oid": 23, "width": 32}}, {"id": 19, "name": "agginitval", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "aggminitval", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "aggfinalmodify", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}, {"id": 22, "name": "aggmfinalmodify", "nullable": true, "type": {"family": "StringFamily", "oid": 18, "visibleType": 9, "width": 1}}], "formatVersion": 3, "id": 4294967101, "name": "pg_aggregate", "nextColumnId": 23, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967102, "version": "1"}} +4294967102 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294967102, "name": "pg_catalog", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} +4294967103 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "view_definition", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "check_option", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_updatable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_insertable_into", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_trigger_updatable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "is_trigger_deletable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_trigger_insertable_into", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967103, "name": "views", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967104 {"table": {"columns": [{"id": 1, "name": "view_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "view_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "view_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967104, "name": "view_table_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967105 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967105, "name": "view_routine_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967106 {"table": {"columns": [{"id": 1, "name": "view_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "view_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "view_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967106, "name": "view_column_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967107 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967107, "name": "user_privileges", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967108 {"table": {"columns": [{"id": 1, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967108, "name": "user_mappings", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967109 {"table": {"columns": [{"id": 1, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967109, "name": "user_mapping_options", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967110 {"table": {"columns": [{"id": 1, "name": "user_defined_type_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "user_defined_type_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "user_defined_type_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "user_defined_type_category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_instantiable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_final", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "ordering_form", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ordering_category", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "ordering_routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "ordering_routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "ordering_routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "reference_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 26, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 28, "name": "source_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "ref_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967110, "name": "user_defined_types", "nextColumnId": 30, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS user_defined_type_catalog, CAST(n.nspname AS STRING) AS user_defined_type_schema, CAST(c.relname AS STRING) AS user_defined_type_name, CAST('STRUCTURED' AS STRING) AS user_defined_type_category, CAST('YES' AS STRING) AS is_instantiable, CAST(NULL AS STRING) AS is_final, CAST(NULL AS STRING) AS ordering_form, CAST(NULL AS STRING) AS ordering_category, CAST(NULL AS STRING) AS ordering_routine_catalog, CAST(NULL AS STRING) AS ordering_routine_schema, CAST(NULL AS STRING) AS ordering_routine_name, CAST(NULL AS STRING) AS reference_type, CAST(NULL AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(NULL AS STRING) AS source_dtd_identifier, CAST(NULL AS STRING) AS ref_dtd_identifier FROM pg_namespace AS n, pg_class AS c, pg_type AS t WHERE (((n.oid = c.relnamespace) AND (t.typrelid = c.oid)) AND (c.relkind = 'c')) AND (pg_has_role(t.typowner, 'USAGE') OR has_type_privilege(t.oid, 'USAGE'))"}} +4294967111 {"table": {"columns": [{"id": 1, "name": "attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "user", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967111, "name": "user_attributes", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967112 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967112, "name": "usage_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967113 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967113, "name": "udt_privileges", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967114 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "type_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "type_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "type_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967114, "name": "type_privileges", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967115 {"table": {"columns": [{"id": 1, "name": "trigger_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "trigger_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "trigger_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "event_manipulation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "event_object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "event_object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "event_object_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "action_order", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "action_condition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "action_statement", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "action_orientation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "action_timing", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "action_reference_old_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "action_reference_new_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "action_reference_old_row", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "action_reference_new_row", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967115, "name": "triggers", "nextColumnId": 18, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967116 {"table": {"columns": [{"id": 1, "name": "trigger_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "trigger_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "trigger_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "event_object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "event_object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "event_object_table", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "event_object_column", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967116, "name": "triggered_update_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967117 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "transform_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967117, "name": "transforms", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967118 {"table": {"columns": [{"id": 1, "name": "extent_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "logfile_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "nodegroup_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "tablespace_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "autoextend_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "maximum_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "tablespace_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967118, "name": "tablespaces", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967119 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967119, "name": "tablespaces_extensions", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967120 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_insertable_into", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "version", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967120, "name": "tables", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967121 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967121, "name": "tables_extensions", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967122 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_hierarchy", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967122, "name": "table_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967123 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967123, "name": "table_constraints_extensions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967124 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "constraint_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_deferrable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "initially_deferred", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967124, "name": "table_constraints", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967125 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "non_unique", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "index_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "index_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "seq_in_index", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "COLLATION", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "direction", "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "storing", "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "implicit", "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "is_visible", "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "visibility", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967125, "name": "statistics", "nextColumnId": 16, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967126 {"table": {"columns": [{"id": 1, "name": "conversion_factor", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 2, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "unit_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "unit_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967126, "name": "st_units_of_measure", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967127 {"table": {"columns": [{"id": 1, "name": "srs_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "srs_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "organization", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "organization_coordsys_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967127, "name": "st_spatial_reference_systems", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967128 {"table": {"columns": [{"id": 1, "name": "srs_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "srs_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "geometry_type_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967128, "name": "st_geometry_columns", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967129 {"table": {"columns": [{"id": 1, "name": "variable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "value", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967129, "name": "session_variables", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967130 {"table": {"columns": [{"id": 1, "name": "sequence_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "sequence_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "sequence_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "data_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "numeric_precision", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "numeric_precision_radix", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "numeric_scale", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "start_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "minimum_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "maximum_value", "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "increment", "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "cycle_option", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967130, "name": "sequences", "nextColumnId": 13, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967131 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967131, "name": "schema_privileges", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967132 {"table": {"columns": [{"id": 1, "name": "catalog_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "schema_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "default_character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "sql_path", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "crdb_is_user_defined", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967132, "name": "schemata", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967133 {"table": {"columns": [{"id": 1, "name": "catalog_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "options", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "schema_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967133, "name": "schemata_extensions", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967134 {"table": {"columns": [{"id": 1, "name": "sizing_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "sizing_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "supported_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967134, "name": "sql_sizing", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967135 {"table": {"columns": [{"id": 1, "name": "feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_supported", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "is_verified_by", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967135, "name": "sql_parts", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967136 {"table": {"columns": [{"id": 1, "name": "implementation_info_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "implementation_info_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "integer_value", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "character_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967136, "name": "sql_implementation_info", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967137 {"table": {"columns": [{"id": 1, "name": "feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "sub_feature_id", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "sub_feature_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "is_supported", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_verified_by", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "comments", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967137, "name": "sql_features", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967138 {"table": {"columns": [{"id": 1, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "module_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "module_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "module_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 26, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 29, "name": "type_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "type_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "type_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 33, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 34, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 35, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 36, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 37, "name": "routine_body", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 38, "name": "routine_definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 39, "name": "external_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 40, "name": "external_language", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 41, "name": "parameter_style", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 42, "name": "is_deterministic", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 43, "name": "sql_data_access", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 44, "name": "is_null_call", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 45, "name": "sql_path", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 46, "name": "schema_level_routine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 47, "name": "max_dynamic_result_sets", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 48, "name": "is_user_defined_cast", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 49, "name": "is_implicitly_invocable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 50, "name": "security_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 51, "name": "to_sql_specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 52, "name": "to_sql_specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 53, "name": "to_sql_specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 54, "name": "as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 55, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 56, "name": "last_altered", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 57, "name": "new_savepoint_level", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 58, "name": "is_udt_dependent", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 59, "name": "result_cast_from_data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 60, "name": "result_cast_as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 61, "name": "result_cast_char_max_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 62, "name": "result_cast_char_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 63, "name": "result_cast_char_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 64, "name": "result_cast_char_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 65, "name": "result_cast_char_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 66, "name": "result_cast_collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 67, "name": "result_cast_collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 68, "name": "result_cast_collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 69, "name": "result_cast_numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 70, "name": "result_cast_numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 71, "name": "result_cast_numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 72, "name": "result_cast_datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 73, "name": "result_cast_interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 74, "name": "result_cast_interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 75, "name": "result_cast_type_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 76, "name": "result_cast_type_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 77, "name": "result_cast_type_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 78, "name": "result_cast_scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 79, "name": "result_cast_scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 80, "name": "result_cast_scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 81, "name": "result_cast_maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 82, "name": "result_cast_dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967138, "name": "routines", "nextColumnId": 83, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS specific_catalog, CAST(n.nspname AS STRING) AS specific_schema, CAST(nameconcatoid(p.proname, p.oid) AS STRING) AS specific_name, CAST(current_database() AS STRING) AS routine_catalog, CAST(n.nspname AS STRING) AS routine_schema, CAST(p.proname AS STRING) AS routine_name, CAST(CASE p.prokind WHEN 'f' THEN 'FUNCTION' WHEN 'p' THEN 'PROCEDURE' END AS STRING) AS routine_type, CAST(NULL AS STRING) AS module_catalog, CAST(NULL AS STRING) AS module_schema, CAST(NULL AS STRING) AS module_name, CAST(NULL AS STRING) AS udt_catalog, CAST(NULL AS STRING) AS udt_schema, CAST(NULL AS STRING) AS udt_name, CAST(CASE WHEN p.prokind = 'p' THEN NULL WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(CASE WHEN nt.nspname IS NOT NULL THEN current_database() END AS STRING) AS type_udt_catalog, CAST(nt.nspname AS STRING) AS type_udt_schema, CAST(t.typname AS STRING) AS type_udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST(CASE WHEN p.prokind != 'p' THEN 0 END AS STRING) AS dtd_identifier, CAST(CASE WHEN l.lanname = 'sql' THEN 'SQL' ELSE 'EXTERNAL' END AS STRING) AS routine_body, CAST(CASE WHEN pg_has_role(p.proowner, 'USAGE') THEN p.prosrc ELSE NULL END AS STRING) AS routine_definition, CAST(CASE WHEN l.lanname = 'c' THEN p.prosrc ELSE NULL END AS STRING) AS external_name, CAST(upper(l.lanname) AS STRING) AS external_language, CAST('GENERAL' AS STRING) AS parameter_style, CAST(CASE WHEN p.provolatile = 'i' THEN 'YES' ELSE 'NO' END AS STRING) AS is_deterministic, CAST('MODIFIES' AS STRING) AS sql_data_access, CAST(CASE WHEN p.prokind != 'p' THEN CASE WHEN p.proisstrict THEN 'YES' ELSE 'NO' END END AS STRING) AS is_null_call, CAST(NULL AS STRING) AS sql_path, CAST('YES' AS STRING) AS schema_level_routine, CAST(0 AS INT8) AS max_dynamic_result_sets, CAST(NULL AS STRING) AS is_user_defined_cast, CAST(NULL AS STRING) AS is_implicitly_invocable, CAST(CASE WHEN p.prosecdef THEN 'DEFINER' ELSE 'INVOKER' END AS STRING) AS security_type, CAST(NULL AS STRING) AS to_sql_specific_catalog, CAST(NULL AS STRING) AS to_sql_specific_schema, CAST(NULL AS STRING) AS to_sql_specific_name, CAST('NO' AS STRING) AS as_locator, CAST(NULL AS TIMESTAMPTZ) AS created, CAST(NULL AS TIMESTAMPTZ) AS last_altered, CAST(NULL AS STRING) AS new_savepoint_level, CAST('NO' AS STRING) AS is_udt_dependent, CAST(NULL AS STRING) AS result_cast_from_data_type, CAST(NULL AS STRING) AS result_cast_as_locator, CAST(NULL AS INT8) AS result_cast_char_max_length, CAST(NULL AS INT8) AS result_cast_char_octet_length, CAST(NULL AS STRING) AS result_cast_char_set_catalog, CAST(NULL AS STRING) AS result_cast_char_set_schema, CAST(NULL AS STRING) AS result_cast_char_set_name, CAST(NULL AS STRING) AS result_cast_collation_catalog, CAST(NULL AS STRING) AS result_cast_collation_schema, CAST(NULL AS STRING) AS result_cast_collation_name, CAST(NULL AS INT8) AS result_cast_numeric_precision, CAST(NULL AS INT8) AS result_cast_numeric_precision_radix, CAST(NULL AS INT8) AS result_cast_numeric_scale, CAST(NULL AS INT8) AS result_cast_datetime_precision, CAST(NULL AS STRING) AS result_cast_interval_type, CAST(NULL AS INT8) AS result_cast_interval_precision, CAST(NULL AS STRING) AS result_cast_type_udt_catalog, CAST(NULL AS STRING) AS result_cast_type_udt_schema, CAST(NULL AS STRING) AS result_cast_type_udt_name, CAST(NULL AS STRING) AS result_cast_scope_catalog, CAST(NULL AS STRING) AS result_cast_scope_schema, CAST(NULL AS STRING) AS result_cast_scope_name, CAST(NULL AS INT8) AS result_cast_maximum_cardinality, CAST(NULL AS STRING) AS result_cast_dtd_identifier FROM (pg_catalog.pg_namespace AS n JOIN pg_catalog.pg_proc AS p ON n.oid = p.pronamespace JOIN pg_catalog.pg_language AS l ON p.prolang = l.oid) LEFT JOIN (pg_catalog.pg_type AS t JOIN pg_catalog.pg_namespace AS nt ON t.typnamespace = nt.oid) ON (p.prorettype = t.oid) AND (p.prokind != 'p') WHERE (pg_has_role(p.proowner, 'USAGE') OR has_function_privilege(p.oid, 'EXECUTE'))"}} +4294967139 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967139, "name": "routine_privileges", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967140 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967140, "name": "role_usage_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967141 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967141, "name": "role_udt_grants", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967142 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "with_hierarchy", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967142, "name": "role_table_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967143 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "routine_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "routine_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "routine_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967143, "name": "role_routine_grants", "nextColumnId": 11, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967144 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967144, "name": "role_column_grants", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967145 {"table": {"columns": [{"id": 1, "name": "resource_group_enabled", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 2, "name": "resource_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "resource_group_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "thread_priority", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "vcpu_ids", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967145, "name": "resource_groups", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967146 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "unique_constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "unique_constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "unique_constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "match_option", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "update_rule", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "delete_rule", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "referenced_table_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967146, "name": "referential_constraints", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967147 {"table": {"columns": [{"id": 1, "name": "cpu_system", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 2, "name": "messages_sent", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "swaps", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "block_ops_in", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "block_ops_out", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "context_voluntary", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "cpu_user", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 8, "name": "query_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "source_function", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "context_involuntary", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "duration", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 12, "name": "page_faults_major", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "page_faults_minor", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "seq", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "source_file", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "messages_received", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "source_line", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967147, "name": "profiling", "nextColumnId": 19, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967148 {"table": {"columns": [{"id": 1, "name": "host", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "info", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "state", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "time", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "user", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "command", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "db", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967148, "name": "processlist", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967149 {"table": {"columns": [{"id": 1, "name": "plugin_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "load_option", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "plugin_description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "plugin_library_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "plugin_status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "plugin_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "plugin_type_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "plugin_author", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "plugin_library", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "plugin_license", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "plugin_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967149, "name": "plugins", "nextColumnId": 12, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967150 {"table": {"columns": [{"id": 1, "name": "data_free", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "partition_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "subpartition_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_rows", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "avg_row_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "check_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 8, "name": "create_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 9, "name": "index_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "nodegroup", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "partition_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "partition_description", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "checksum", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "partition_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "partition_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "subpartition_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "update_time", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 20, "name": "data_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "max_data_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "partition_ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "subpartition_method", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "subpartition_ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 25, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967150, "name": "partitions", "nextColumnId": 26, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967151 {"table": {"columns": [{"id": 1, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 5, "name": "parameter_mode", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_result", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "as_locator", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "parameter_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 20, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 22, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 24, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 31, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "parameter_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967151, "name": "parameters", "nextColumnId": 33, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS specific_catalog, CAST(n_nspname AS STRING) AS specific_schema, CAST(nameconcatoid(proname, p_oid) AS STRING) AS specific_name, CAST((ss.x).n AS INT8) AS ordinal_position, CAST(CASE WHEN proargmodes IS NULL THEN 'IN' WHEN proargmodes[(ss.x).n] = 'i' THEN 'IN' WHEN proargmodes[(ss.x).n] = 'o' THEN 'OUT' WHEN proargmodes[(ss.x).n] = 'b' THEN 'INOUT' WHEN proargmodes[(ss.x).n] = 'v' THEN 'IN' WHEN proargmodes[(ss.x).n] = 't' THEN 'OUT' END AS STRING) AS parameter_mode, CAST('NO' AS STRING) AS is_result, CAST('NO' AS STRING) AS as_locator, CAST(NULLIF(proargnames[(ss.x).n], '') AS STRING) AS parameter_name, CAST(CASE WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(NULL AS INT8) AS character_maximum_length, CAST(NULL AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(NULL AS STRING) AS collation_catalog, CAST(NULL AS STRING) AS collation_schema, CAST(NULL AS STRING) AS collation_name, CAST(NULL AS INT8) AS numeric_precision, CAST(NULL AS INT8) AS numeric_precision_radix, CAST(NULL AS INT8) AS numeric_scale, CAST(NULL AS INT8) AS datetime_precision, CAST(NULL AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(current_database() AS STRING) AS udt_catalog, CAST(nt.nspname AS STRING) AS udt_schema, CAST(t.typname AS STRING) AS udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST((ss.x).n AS STRING) AS dtd_identifier, CAST(CASE WHEN pg_has_role(proowner, 'USAGE') THEN pg_get_function_arg_default(p_oid, (ss.x).n) ELSE NULL END AS STRING) AS parameter_default FROM pg_type AS t, pg_namespace AS nt, (SELECT n.nspname AS n_nspname, p.proname, p.oid AS p_oid, p.proowner, p.proargnames, p.proargmodes, information_schema._pg_expandarray(COALESCE(p.proallargtypes, p.proargtypes::OID[])) AS x FROM pg_namespace AS n, pg_proc AS p WHERE (n.oid = p.pronamespace) AND (pg_has_role(p.proowner, 'USAGE') OR has_function_privilege(p.oid, 'EXECUTE'))) AS ss WHERE (t.oid = (ss.x).x) AND (t.typnamespace = nt.oid)"}} +4294967152 {"table": {"columns": [{"id": 1, "name": "insufficient_privileges", "nullable": true, "type": {"family": "IntFamily", "oid": 21, "width": 16}}, {"id": 2, "name": "missing_bytes_beyond_max_mem_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "query", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "trace", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967152, "name": "optimizer_trace", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967153 {"table": {"columns": [{"id": 1, "name": "word", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "reserved", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967153, "name": "keywords", "nextColumnId": 3, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967154 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "ordinal_position", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "position_in_unique_constraint", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967154, "name": "key_column_usage", "nextColumnId": 10, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967155 {"table": {"columns": [{"id": 1, "name": "catalog_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967155, "name": "information_schema_catalog_name", "nextColumnId": 2, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967156 {"table": {"columns": [{"id": 1, "name": "foreign_table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967156, "name": "foreign_tables", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967157 {"table": {"columns": [{"id": 1, "name": "foreign_table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967157, "name": "foreign_table_options", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967158 {"table": {"columns": [{"id": 1, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_server_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "foreign_server_version", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967158, "name": "foreign_servers", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967159 {"table": {"columns": [{"id": 1, "name": "foreign_server_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_server_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967159, "name": "foreign_server_options", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967160 {"table": {"columns": [{"id": 1, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "authorization_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "library_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "foreign_data_wrapper_language", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967160, "name": "foreign_data_wrappers", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967161 {"table": {"columns": [{"id": 1, "name": "foreign_data_wrapper_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "foreign_data_wrapper_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967161, "name": "foreign_data_wrapper_options", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967162 {"table": {"columns": [{"id": 1, "name": "last_update_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 2, "name": "table_rows", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "autoextend_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "check_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "checksum", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 6, "name": "extra", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "file_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "table_name", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 9, "name": "avg_row_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 10, "name": "extent_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "file_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "free_extents", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "max_data_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 14, "name": "table_schema", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 15, "name": "update_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 16, "name": "data_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 17, "name": "tablespace_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "version", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "create_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 20, "name": "initial_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "logfile_group_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "maximum_size", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "update_count", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 25, "name": "creation_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 26, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "fulltext_keys", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 28, "name": "row_format", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "total_extents", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 30, "name": "data_free", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 31, "name": "index_length", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 32, "name": "last_access_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 33, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 34, "name": "transaction_counter", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 35, "name": "file_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "logfile_group_number", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 37, "name": "recover_time", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 38, "name": "deleted_rows", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967162, "name": "files", "nextColumnId": 39, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967163 {"table": {"columns": [{"id": 1, "name": "definer", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "event_definition", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "event_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "interval_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "last_altered", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 6, "name": "on_completion", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "originator", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "collation_connection", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "database_collation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "event_body", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "event_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "execute_at", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 13, "name": "interval_field", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "starts", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 15, "name": "time_zone", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "character_set_client", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "ends", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 18, "name": "event_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "event_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "event_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "last_executed", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 22, "name": "sql_mode", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 23, "name": "status", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "created", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}], "formatVersion": 3, "id": 4294967163, "name": "events", "nextColumnId": 25, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967164 {"table": {"columns": [{"id": 1, "name": "support", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "transactions", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "xa", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "engine", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "savepoints", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967164, "name": "engines", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967165 {"table": {"columns": [{"id": 1, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967165, "name": "enabled_roles", "nextColumnId": 2, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967166 {"table": {"columns": [{"id": 1, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "collection_type_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "domain_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 29, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967166, "name": "element_types", "nextColumnId": 30, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967167 {"table": {"columns": [{"id": 1, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 11, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 18, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "domain_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 27, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967167, "name": "domains", "nextColumnId": 28, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967168 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967168, "name": "domain_udt_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967169 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_deferrable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "initially_deferred", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967169, "name": "domain_constraints", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967170 {"table": {"columns": [{"id": 1, "name": "object_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "object_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "object_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967170, "name": "data_type_privileges", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967171 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967171, "name": "constraint_table_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967172 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967172, "name": "constraint_column_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967173 {"table": {"columns": [{"id": 1, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "column_comment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "ordinal_position", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "column_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_nullable", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "data_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 14, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 15, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 19, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 20, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 21, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 23, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 30, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 32, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 33, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 34, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 35, "name": "is_self_referencing", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 36, "name": "is_identity", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 37, "name": "identity_generation", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 38, "name": "identity_start", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 39, "name": "identity_increment", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 40, "name": "identity_maximum", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 41, "name": "identity_minimum", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 42, "name": "identity_cycle", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 43, "name": "is_generated", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 44, "name": "generation_expression", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 45, "name": "is_updatable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 46, "name": "is_hidden", "type": {"family": "StringFamily", "oid": 25}}, {"id": 47, "name": "crdb_sql_type", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967173, "name": "columns", "nextColumnId": 48, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967174 {"table": {"columns": [{"id": 1, "name": "engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "secondary_engine_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967174, "name": "columns_extensions", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967175 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967175, "name": "column_udt_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967176 {"table": {"columns": [{"id": 1, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "histogram", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "schema_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967176, "name": "column_statistics", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967177 {"table": {"columns": [{"id": 1, "name": "grantor", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "privilege_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "is_grantable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967177, "name": "column_privileges", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967178 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "option_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "option_value", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967178, "name": "column_options", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967179 {"table": {"columns": [{"id": 1, "name": "domain_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "domain_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "domain_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967179, "name": "column_domain_usage", "nextColumnId": 8, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967180 {"table": {"columns": [{"id": 1, "name": "table_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "table_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "table_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "column_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "dependent_column", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967180, "name": "column_column_usage", "nextColumnId": 6, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967181 {"table": {"columns": [{"id": 1, "name": "collation_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "collation_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collation_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "pad_attribute", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967181, "name": "collations", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967182 {"table": {"columns": [{"id": 1, "name": "collation_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "collation_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "collation_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "character_set_name", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967182, "name": "collation_character_set_applicability", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967183 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "check_clause", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967183, "name": "check_constraints", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967184 {"table": {"columns": [{"id": 1, "name": "constraint_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "constraint_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "constraint_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "specific_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "specific_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "specific_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967184, "name": "check_constraint_routine_usage", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967185 {"table": {"columns": [{"id": 1, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "character_set_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "character_repertoire", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "form_of_use", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "default_collate_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "default_collate_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "default_collate_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967185, "name": "character_sets", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967186 {"table": {"columns": [{"id": 1, "name": "udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "attribute_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "ordinal_position", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "attribute_default", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "is_nullable", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "data_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "character_maximum_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "character_octet_length", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "character_set_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 12, "name": "character_set_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 13, "name": "character_set_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "collation_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "collation_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 16, "name": "collation_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 17, "name": "numeric_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "numeric_precision_radix", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 19, "name": "numeric_scale", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 20, "name": "datetime_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 21, "name": "interval_type", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 22, "name": "interval_precision", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 23, "name": "attribute_udt_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 24, "name": "attribute_udt_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 25, "name": "attribute_udt_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 26, "name": "scope_catalog", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 27, "name": "scope_schema", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 28, "name": "scope_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 29, "name": "maximum_cardinality", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 30, "name": "dtd_identifier", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 31, "name": "is_derived_reference_attribute", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967186, "name": "attributes", "nextColumnId": 32, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1", "viewQuery": "SELECT CAST(current_database() AS STRING) AS udt_catalog, CAST(nc.nspname AS STRING) AS udt_schema, CAST(c.relname AS STRING) AS udt_name, CAST(a.attname AS STRING) AS attribute_name, CAST(a.attnum AS INT8) AS ordinal_position, CAST(pg_get_expr(ad.adbin, ad.adrelid) AS STRING) AS attribute_default, CAST(CASE WHEN a.attnotnull OR ((t.typtype = 'd') AND t.typnotnull) THEN 'NO' ELSE 'YES' END AS STRING) AS is_nullable, CAST(CASE WHEN (t.typelem != 0) AND (t.typlen = -1) THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, NULL) ELSE 'USER-DEFINED' END AS STRING) AS data_type, CAST(information_schema._pg_char_max_length(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS character_maximum_length, CAST(information_schema._pg_char_octet_length(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS character_octet_length, CAST(NULL AS STRING) AS character_set_catalog, CAST(NULL AS STRING) AS character_set_schema, CAST(NULL AS STRING) AS character_set_name, CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS STRING) AS collation_catalog, CAST(nco.nspname AS STRING) AS collation_schema, CAST(co.collname AS STRING) AS collation_name, CAST(information_schema._pg_numeric_precision(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_precision, CAST(information_schema._pg_numeric_precision_radix(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_precision_radix, CAST(information_schema._pg_numeric_scale(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS numeric_scale, CAST(information_schema._pg_datetime_precision(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS INT8) AS datetime_precision, CAST(information_schema._pg_interval_type(information_schema._pg_truetypid(a, t), information_schema._pg_truetypmod(a, t)) AS STRING) AS interval_type, CAST(NULL AS INT8) AS interval_precision, CAST(current_database() AS STRING) AS attribute_udt_catalog, CAST(nt.nspname AS STRING) AS attribute_udt_schema, CAST(t.typname AS STRING) AS attribute_udt_name, CAST(NULL AS STRING) AS scope_catalog, CAST(NULL AS STRING) AS scope_schema, CAST(NULL AS STRING) AS scope_name, CAST(NULL AS INT8) AS maximum_cardinality, CAST(a.attnum AS STRING) AS dtd_identifier, CAST('NO' AS STRING) AS is_derived_reference_attribute FROM (pg_attribute AS a LEFT JOIN pg_attrdef AS ad ON (attrelid = adrelid) AND (attnum = adnum)) JOIN (pg_class AS c JOIN pg_namespace AS nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid JOIN (pg_type AS t JOIN pg_namespace AS nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid LEFT JOIN (pg_collation AS co JOIN pg_namespace AS nco ON (co.collnamespace = nco.oid)) ON (a.attcollation = co.oid) AND ((nco.nspname, co.collname) != ('pg_catalog', 'default')) WHERE (((a.attnum > 0) AND (NOT a.attisdropped)) AND (c.relkind IN ('c',))) AND (pg_has_role(c.relowner, 'USAGE') OR has_type_privilege(c.reltype, 'USAGE'))"}} +4294967187 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_grantable", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967187, "name": "applicable_roles", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967188 {"table": {"columns": [{"id": 1, "name": "grantee", "type": {"family": "StringFamily", "oid": 25}}, {"id": 2, "name": "role_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "is_grantable", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967188, "name": "administrable_role_authorizations", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967189, "version": "1"}} +4294967189 {"schema": {"defaultPrivileges": {"type": "SCHEMA"}, "id": 4294967189, "name": "information_schema", "privileges": {"ownerProto": "node", "users": [{"privileges": "512", "userProto": "public"}], "version": 3}, "version": "1"}} +4294967190 {"table": {"columns": [{"id": 1, "name": "object_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "schema_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "database_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "object_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "schema_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "database_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 7, "name": "fq_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967190, "name": "fully_qualified_names", "nextColumnId": 8, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1", "viewQuery": "SELECT t.id, sc.id, db.id, t.name, sc.name, db.name, (((quote_ident(db.name) || '.') || quote_ident(sc.name)) || '.') || quote_ident(t.name) FROM system.namespace AS t JOIN system.namespace AS sc ON t.\"parentSchemaID\" = sc.id JOIN system.namespace AS db ON t.\"parentID\" = db.id WHERE (db.\"parentID\" = 0) AND pg_catalog.has_database_privilege(db.name, 'CONNECT')"}} 4294967191 {"table": {"columns": [{"id": 1, "name": "stream_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "consumer", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "recv_wait", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 4, "name": "last_recv_wait", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 5, "name": "flush_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "flush_time", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 7, "name": "flush_kvs", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 8, "name": "flush_bytes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 9, "name": "flush_batches", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 10, "name": "last_time", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 11, "name": "last_kvs", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "last_bytes", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 13, "name": "last_slowest", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 14, "name": "cur_time", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 15, "name": "cur_kvs_done", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "cur_kvs_todo", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 17, "name": "cur_batches", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 18, "name": "cur_slowest", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}], "formatVersion": 3, "id": 4294967191, "name": "logical_replication_node_processors", "nextColumnId": 19, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967192 {"table": {"columns": [{"id": 1, "name": "stream_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "consumer", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "span_start", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "span_end", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "resolved", "nullable": true, "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 6, "name": "resolved_age", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}], "formatVersion": 3, "id": 4294967192, "name": "cluster_replication_node_stream_checkpoints", "nextColumnId": 7, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967193 {"table": {"columns": [{"id": 1, "name": "stream_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "consumer", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "span_start", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "span_end", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967193, "name": "cluster_replication_node_stream_spans", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} @@ -432,7 +434,7 @@ SELECT id, strip_volatile(descriptor) FROM crdb_internal.kv_catalog_descriptor O 4294967222 {"table": {"columns": [{"id": 1, "name": "node_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "application_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "key", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "statement_ids", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}, {"id": 5, "name": "count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "max_retries", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "service_lat_avg", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 8, "name": "service_lat_var", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 9, "name": "retry_lat_avg", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 10, "name": "retry_lat_var", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 11, "name": "commit_lat_avg", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 12, "name": "commit_lat_var", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 13, "name": "idle_lat_avg", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 14, "name": "idle_lat_var", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 15, "name": "rows_read_avg", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 16, "name": "rows_read_var", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 17, "name": "network_bytes_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 18, "name": "network_bytes_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 19, "name": "network_msgs_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 20, "name": "network_msgs_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 21, "name": "max_mem_usage_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 22, "name": "max_mem_usage_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 23, "name": "max_disk_usage_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 24, "name": "max_disk_usage_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 25, "name": "contention_time_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 26, "name": "contention_time_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 27, "name": "cpu_sql_nanos_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 28, "name": "cpu_sql_nanos_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 29, "name": "mvcc_step_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 30, "name": "mvcc_step_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 31, "name": "mvcc_step_internal_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 32, "name": "mvcc_step_internal_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 33, "name": "mvcc_seek_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 34, "name": "mvcc_seek_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 35, "name": "mvcc_seek_internal_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 36, "name": "mvcc_seek_internal_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 37, "name": "mvcc_block_bytes_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 38, "name": "mvcc_block_bytes_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 39, "name": "mvcc_block_bytes_in_cache_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 40, "name": "mvcc_block_bytes_in_cache_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 41, "name": "mvcc_key_bytes_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 42, "name": "mvcc_key_bytes_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 43, "name": "mvcc_value_bytes_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 44, "name": "mvcc_value_bytes_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 45, "name": "mvcc_point_count_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 46, "name": "mvcc_point_count_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 47, "name": "mvcc_points_covered_by_range_tombstones_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 48, "name": "mvcc_points_covered_by_range_tombstones_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 49, "name": "mvcc_range_key_count_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 50, "name": "mvcc_range_key_count_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 51, "name": "mvcc_range_key_contained_points_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 52, "name": "mvcc_range_key_contained_points_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 53, "name": "mvcc_range_key_skipped_points_avg", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 54, "name": "mvcc_range_key_skipped_points_var", "nullable": true, "type": {"family": "FloatFamily", "oid": 701, "width": 64}}], "formatVersion": 3, "id": 4294967222, "name": "node_transaction_statistics", "nextColumnId": 55, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967223 {"table": {"columns": [{"id": 1, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "table_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "estimated_row_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}], "formatVersion": 3, "id": 4294967223, "name": "table_row_statistics", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967224 {"table": {"columns": [{"id": 1, "name": "table_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "parent_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 3, "name": "name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 4, "name": "database_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "version", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 6, "name": "mod_time", "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 7, "name": "mod_time_logical", "type": {"family": "DecimalFamily", "oid": 1700}}, {"id": 8, "name": "format_version", "type": {"family": "StringFamily", "oid": 25}}, {"id": 9, "name": "state", "type": {"family": "StringFamily", "oid": 25}}, {"id": 10, "name": "sc_lease_node_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 11, "name": "sc_lease_expiration_time", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 12, "name": "drop_time", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 13, "name": "audit_mode", "type": {"family": "StringFamily", "oid": 25}}, {"id": 14, "name": "schema_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 15, "name": "parent_schema_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 16, "name": "locality", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967224, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [2], "keyColumnNames": ["parent_id"], "name": "tables_parent_id_idx", "partitioning": {}, "predicate": "drop_time IS NULL", "sharded": {}, "storeColumnIds": [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], "storeColumnNames": ["table_id", "name", "database_name", "version", "mod_time", "mod_time_logical", "format_version", "state", "sc_lease_node_id", "sc_lease_expiration_time", "drop_time", "audit_mode", "schema_name", "parent_schema_id", "locality"], "version": 3}, {"foreignKey": {}, "geoConfig": {}, "id": 3, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [4], "keyColumnNames": ["database_name"], "name": "tables_database_name_idx", "partitioning": {}, "predicate": "drop_time IS NULL", "sharded": {}, "storeColumnIds": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], "storeColumnNames": ["table_id", "parent_id", "name", "version", "mod_time", "mod_time_logical", "format_version", "state", "sc_lease_node_id", "sc_lease_expiration_time", "drop_time", "audit_mode", "schema_name", "parent_schema_id", "locality"], "version": 3}], "name": "tables", "nextColumnId": 17, "nextConstraintId": 2, "nextIndexId": 4, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} -4294967225 {"table": {"columns": [{"id": 1, "name": "descriptor_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "start_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "end_key", "type": {"family": "BytesFamily", "oid": 17}}], "formatVersion": 3, "id": 4294967225, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["descriptor_id"], "name": "table_spans_descriptor_id_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3], "storeColumnNames": ["start_key", "end_key"], "version": 3}], "name": "table_spans", "nextColumnId": 4, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} +4294967225 {"table": {"columns": [{"id": 1, "name": "descriptor_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "start_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "end_key", "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "dropped", "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967225, "indexes": [{"foreignKey": {}, "geoConfig": {}, "id": 2, "interleave": {}, "keyColumnDirections": ["ASC"], "keyColumnIds": [1], "keyColumnNames": ["descriptor_id"], "name": "table_spans_descriptor_id_idx", "partitioning": {}, "sharded": {}, "storeColumnIds": [2, 3, 4], "storeColumnNames": ["start_key", "end_key", "dropped"], "version": 3}], "name": "table_spans", "nextColumnId": 5, "nextConstraintId": 2, "nextIndexId": 3, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967226 {"table": {"columns": [{"id": 1, "name": "descriptor_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "descriptor_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "index_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "index_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "index_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "is_unique", "type": {"oid": 16}}, {"id": 7, "name": "is_inverted", "type": {"oid": 16}}, {"id": 8, "name": "is_sharded", "type": {"oid": 16}}, {"id": 9, "name": "is_visible", "type": {"oid": 16}}, {"id": 10, "name": "visibility", "type": {"family": "FloatFamily", "oid": 701, "width": 64}}, {"id": 11, "name": "shard_bucket_count", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 12, "name": "created_at", "nullable": true, "type": {"family": "TimestampFamily", "oid": 1114}}, {"id": 13, "name": "create_statement", "type": {"family": "StringFamily", "oid": 25}}], "formatVersion": 3, "id": 4294967226, "name": "table_indexes", "nextColumnId": 14, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967227 {"table": {"columns": [{"id": 1, "name": "descriptor_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 2, "name": "descriptor_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 3, "name": "column_id", "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 4, "name": "column_name", "type": {"family": "StringFamily", "oid": 25}}, {"id": 5, "name": "column_type", "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "nullable", "type": {"oid": 16}}, {"id": 7, "name": "default_expr", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 8, "name": "hidden", "type": {"oid": 16}}], "formatVersion": 3, "id": 4294967227, "name": "table_columns", "nextColumnId": 9, "nextConstraintId": 2, "nextIndexId": 2, "nextMutationId": 1, "primaryIndex": {"constraintId": 1, "foreignKey": {}, "geoConfig": {}, "id": 1, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1"}} 4294967228 {"table": {"columns": [{"id": 1, "name": "aggregated_ts", "nullable": true, "type": {"family": "TimestampTZFamily", "oid": 1184}}, {"id": 2, "name": "fingerprint_id", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 3, "name": "transaction_fingerprint_id", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 4, "name": "plan_hash", "nullable": true, "type": {"family": "BytesFamily", "oid": 17}}, {"id": 5, "name": "app_name", "nullable": true, "type": {"family": "StringFamily", "oid": 25}}, {"id": 6, "name": "node_id", "nullable": true, "type": {"family": "IntFamily", "oid": 20, "width": 64}}, {"id": 7, "name": "agg_interval", "nullable": true, "type": {"family": "IntervalFamily", "intervalDurationField": {}, "oid": 1186}}, {"id": 8, "name": "metadata", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 9, "name": "statistics", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 10, "name": "plan", "nullable": true, "type": {"family": "JsonFamily", "oid": 3802}}, {"id": 11, "name": "index_recommendations", "nullable": true, "type": {"arrayContents": {"family": "StringFamily", "oid": 25}, "arrayElemType": "StringFamily", "family": "ArrayFamily", "oid": 1009}}], "formatVersion": 3, "id": 4294967228, "name": "statement_statistics_persisted_v22_2", "nextColumnId": 12, "nextConstraintId": 1, "nextMutationId": 1, "primaryIndex": {"foreignKey": {}, "geoConfig": {}, "interleave": {}, "partitioning": {}, "sharded": {}}, "privileges": {"ownerProto": "node", "users": [{"privileges": "32", "userProto": "public"}], "version": 3}, "replacementOf": {"time": {}}, "unexposedParentSchemaId": 4294967295, "version": "1", "viewQuery": "SELECT aggregated_ts, fingerprint_id, transaction_fingerprint_id, plan_hash, app_name, node_id, agg_interval, metadata, statistics, plan, index_recommendations FROM system.statement_statistics"}} diff --git a/pkg/sql/logictest/testdata/logic_test/create_index b/pkg/sql/logictest/testdata/logic_test/create_index index cf854162ba0e..ef7185151eff 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_index +++ b/pkg/sql/logictest/testdata/logic_test/create_index @@ -159,19 +159,19 @@ subtest create_index_concurrently statement ok CREATE TABLE create_index_concurrently_tbl (a int) -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok CREATE INDEX CONCURRENTLY create_index_concurrently_idx ON create_index_concurrently_tbl(a) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE INDEX CONCURRENTLY create_index_concurrently_idx ON create_index_concurrently_tbl(a) ---- NOTICE: CONCURRENTLY is not required as all indexes are created concurrently -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE INDEX CONCURRENTLY IF NOT EXISTS create_index_concurrently_idx ON create_index_concurrently_tbl(a) ---- @@ -186,8 +186,8 @@ create_index_concurrently_tbl CREATE TABLE public.create_index_concurrently_tbl INDEX create_index_concurrently_idx (a ASC) ) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace DROP INDEX CONCURRENTLY create_index_concurrently_idx ---- @@ -195,14 +195,14 @@ NOTICE: CONCURRENTLY is not required as all indexes are dropped concurrently NOTICE: the data for dropped indexes is reclaimed asynchronously HINT: The reclamation delay can be customized in the zone configuration for the table. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace DROP INDEX CONCURRENTLY IF EXISTS create_index_concurrently_idx ---- NOTICE: CONCURRENTLY is not required as all indexes are dropped concurrently -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok DROP INDEX CONCURRENTLY create_index_concurrently_idx diff --git a/pkg/sql/logictest/testdata/logic_test/create_statements b/pkg/sql/logictest/testdata/logic_test/create_statements index 4943c1988abc..45c7a7e8c261 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_statements +++ b/pkg/sql/logictest/testdata/logic_test/create_statements @@ -1,6 +1,7 @@ -# LogicTest: !local-read-committed -# This test is skipped under READ COMMITTED since it asserts on the NOTICE -# output, and read committed schema changes always send out extra notices. +# LogicTest: !weak-iso-level-configs +# This test is skipped under READ COMMITTED and REPEATABLE READ, since it +# asserts on the NOTICE output, and weak isolation level schema changes always +# send out extra notices. statement ok CREATE TABLE t ( diff --git a/pkg/sql/logictest/testdata/logic_test/create_table b/pkg/sql/logictest/testdata/logic_test/create_table index edc1c0798308..9d8971070f1a 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_table +++ b/pkg/sql/logictest/testdata/logic_test/create_table @@ -44,12 +44,18 @@ set require_explicit_primary_keys=true statement error pq: no primary key specified for table t \(require_explicit_primary_keys = true\) CREATE TABLE t (x INT, y INT) +# Make sure the setting does not block a table that _does_ have a PK. +statement ok +CREATE TABLE t (x INT PRIMARY KEY, y INT) + # Regression for #45496. statement ok reset require_explicit_primary_keys; statement ok DROP TABLE IF EXISTS t; + +statement ok CREATE TABLE t (rowid INT, rowid_1 INT, FAMILY (rowid, rowid_1)) query T rowsort @@ -657,8 +663,8 @@ FROM ( LEFT JOIN pg_catalog.pg_depend r ON l.table_id = r.objid; ---- table_id name state refobjid -161 test_serial_b_seq PUBLIC 160 -160 test_serial PUBLIC NULL +162 test_serial_b_seq PUBLIC 161 +161 test_serial PUBLIC NULL statement ok DROP TABLE test_serial; @@ -690,8 +696,8 @@ FROM ( LEFT JOIN pg_catalog.pg_depend r ON l.table_id = r.objid; ---- table_id name state refobjid -163 test_serial_b_seq PUBLIC 162 -162 test_serial PUBLIC NULL +164 test_serial_b_seq PUBLIC 163 +163 test_serial PUBLIC NULL statement ok ALTER TABLE test_serial DROP COLUMN b; @@ -706,7 +712,7 @@ FROM ( LEFT JOIN pg_catalog.pg_depend r ON l.table_id = r.objid; ---- table_id name state refobjid -162 test_serial PUBLIC NULL +163 test_serial PUBLIC NULL statement ok DROP TABLE test_serial; diff --git a/pkg/sql/logictest/testdata/logic_test/cross_version_tenant_backup b/pkg/sql/logictest/testdata/logic_test/cross_version_tenant_backup index 2a9fc0e23386..ff43786ada81 100644 --- a/pkg/sql/logictest/testdata/logic_test/cross_version_tenant_backup +++ b/pkg/sql/logictest/testdata/logic_test/cross_version_tenant_backup @@ -19,11 +19,11 @@ foo 3 statement ok BACKUP TENANT 3 INTO 'userfile:///1/example' -upgrade 0 +upgrade all -upgrade 1 - -upgrade 2 +# Wait for upgrade to finalize. +statement ok +SET CLUSTER SETTING version = crdb_internal.node_executable_version() statement ok RESTORE TENANT 3 FROM LATEST IN 'userfile:///1/example' with tenant_name = 'baz' diff --git a/pkg/sql/logictest/testdata/logic_test/dangerous_statements b/pkg/sql/logictest/testdata/logic_test/dangerous_statements index a247ec64d88e..0f1b2694b9f8 100644 --- a/pkg/sql/logictest/testdata/logic_test/dangerous_statements +++ b/pkg/sql/logictest/testdata/logic_test/dangerous_statements @@ -40,9 +40,13 @@ statement error rejected.*: SELECT FOR UPDATE without WHERE or LIMIT clause statement ok (SELECT * FROM foo WHERE x = 2) FOR UPDATE +# Skipped due to https://github.com/cockroachdb/cockroach/issues/129647. +skipif config weak-iso-level-configs statement ok SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE +# Skipped due to https://github.com/cockroachdb/cockroach/issues/129647. +skipif config weak-iso-level-configs statement ok SELECT * FROM (SELECT * FROM (SELECT * FROM foo) WHERE x = 2) FOR UPDATE @@ -55,6 +59,8 @@ SELECT * FROM (SELECT * FROM foo WHERE x = 2 FOR UPDATE) m, (SELECT * FROM foo) statement error rejected.*: SELECT FOR SHARE without WHERE or LIMIT clause SELECT * FROM (SELECT * FROM foo FOR SHARE) m, (SELECT * FROM foo) n WHERE m.x = n.x +# Skipped due to https://github.com/cockroachdb/cockroach/issues/129647. +skipif config weak-iso-level-configs statement ok SELECT * FROM (SELECT * FROM (SELECT * FROM foo) WHERE x > 1) WHERE x > 2 FOR UPDATE diff --git a/pkg/sql/logictest/testdata/logic_test/datetime b/pkg/sql/logictest/testdata/logic_test/datetime index 4b359020c3d4..5faa5ed51482 100644 --- a/pkg/sql/logictest/testdata/logic_test/datetime +++ b/pkg/sql/logictest/testdata/logic_test/datetime @@ -1707,11 +1707,10 @@ SET TIME ZONE 0 subtest infinity_time -# TODO(#41564): this should display "infinity", "-infinity". query TT SELECT 'infinity'::timestamp, '-infinity'::timestamptz ---- -294276-12-31 23:59:59.999999 +0000 +0000 -4713-11-24 00:00:00 +0000 +0000 +infinity -infinity # Verify that parse_timestamp can be used in computed column expressions. statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/dist_vectorize b/pkg/sql/logictest/testdata/logic_test/dist_vectorize index 9de20d4e09e3..b48dfa038518 100644 --- a/pkg/sql/logictest/testdata/logic_test/dist_vectorize +++ b/pkg/sql/logictest/testdata/logic_test/dist_vectorize @@ -30,7 +30,7 @@ query TTTI rowsort SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE kv WITH DETAILS] ORDER by 1 ---- - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_agg b/pkg/sql/logictest/testdata/logic_test/distsql_agg index feda662c5e5b..993a615b9b94 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_agg +++ b/pkg/sql/logictest/testdata/logic_test/distsql_agg @@ -26,7 +26,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_automatic_partial_stats b/pkg/sql/logictest/testdata/logic_test/distsql_automatic_partial_stats new file mode 100644 index 000000000000..c77694f1a6bc --- /dev/null +++ b/pkg/sql/logictest/testdata/logic_test/distsql_automatic_partial_stats @@ -0,0 +1,177 @@ +# LogicTest: !metamorphic-batch-sizes + +# Test a simple update and insert case for partial statistics + +# Disable automatic stats +statement ok +SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false + +statement ok +SET CLUSTER SETTING sql.stats.automatic_partial_collection.min_stale_rows = 5 + +statement ok +CREATE TABLE data (a INT, b INT, c FLOAT, d DECIMAL, PRIMARY KEY (a, b, c), INDEX c_idx (c), INDEX d_idx (d)) WITH (sql_stats_automatic_partial_collection_enabled = true) + +statement ok +INSERT INTO data SELECT a, b, c::FLOAT, 1 FROM + generate_series(1, 10) AS a(a), + generate_series(1, 10) AS b(b), + generate_series(1, 10) AS c(c) + +# Verify that no auto stats were collected +query TTIIIT colnames +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate + +# Simulate an auto full stats collection +statement ok +CREATE STATISTICS __auto__ FROM data + +# Set the min_stale_rows to MaxInt32 to ensure that full stat collections are +# not triggered +statement ok +SET CLUSTER SETTING sql.stats.automatic_collection.min_stale_rows = 2147483647 + +statement ok +SET CLUSTER SETTING sql.stats.automatic_collection.enabled = true + +# Change 10% of the table to trigger a partial stats collection. +statement ok +UPDATE DATA SET d = 2 WHERE a = 1 + +# The query uses DISTINCT ON and ORDER BY to only show the latest statistic +# available for each set of column names and statistic type. This is important +# in order to tolerate the rare case of multiple auto stats jobs running between +# two retry iterations. +query TTIIIT colnames,retry +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 0 0 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 0 0 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 100 1 0 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) + +# Disable automatic partial stats with the table setting +statement ok +ALTER TABLE data SET (sql_stats_automatic_partial_collection_enabled = false) + +# Change 20% of the table, no new partial stats should be collected. +statement ok +UPDATE DATA SET d = 3 WHERE a = 1 OR a = 2 + +query TTIIIT colnames +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 0 0 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 0 0 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 100 1 0 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) + +# Disable automatic partial stats by overriding the internal executor session +# setting instead +statement ok +SET CLUSTER SETTING sql.internal_executor.session_overrides = 'EnableCreateStatsUsingExtremes=false'; + +statement ok +ALTER TABLE data SET (sql_stats_automatic_partial_collection_enabled = true) + +# Change 20% of the table, no new partial stats should be collected. +statement ok +UPDATE DATA SET d = 4 WHERE a = 1 OR a = 2 + +query TTIIIT colnames +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 0 0 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 0 0 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 100 1 0 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) + +statement ok +SET CLUSTER SETTING sql.internal_executor.session_overrides = 'EnableCreateStatsUsingExtremes=true'; + +# Insert enough data to guarantee a partial stats collection. +statement ok +INSERT INTO data SELECT a, b, c FROM + generate_series(11, 14) AS a(a), + generate_series(11, 14) AS b(b), + generate_series(11, 14) AS c(c) + +query TTIIIT colnames,retry +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 64 4 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 64 4 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 264 2 64 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) + +# Upsert more than 5% of the table. +statement ok +UPSERT INTO data SELECT a, b, c::FLOAT, 5 FROM + generate_series(11, 15) AS a(a), + generate_series(11, 14) AS b(b), + generate_series(11, 13) AS c(c) + +query TTIIIT colnames,retry +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 76 5 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 76 4 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 276 3 16 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) + +# Delete more than 5% of the table. +statement ok +DELETE FROM data WHERE a > 11 + +query TTIIIT colnames,retry +SELECT DISTINCT ON (statistics_name, column_names) statistics_name, column_names, row_count, distinct_count, null_count, partial_predicate +FROM [SHOW STATISTICS FOR TABLE data] ORDER BY statistics_name, column_names, created DESC +---- +statistics_name column_names row_count distinct_count null_count partial_predicate +__auto__ {a} 1000 10 0 NULL +__auto__ {a,b} 1000 100 0 NULL +__auto__ {a,b,c} 1000 1000 0 NULL +__auto__ {b} 1000 10 0 NULL +__auto__ {c} 1000 10 0 NULL +__auto__ {d} 1000 1 0 NULL +__auto_partial__ {a} 16 1 0 (a IS NULL) OR ((a < 1:::INT8) OR (a > 10:::INT8)) +__auto_partial__ {c} 16 4 0 (c IS NULL) OR ((c < 1.0:::FLOAT8) OR (c > 10.0:::FLOAT8)) +__auto_partial__ {d} 216 3 4 (d IS NULL) OR ((d < 1:::DECIMAL) OR (d > 1:::DECIMAL)) diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_crdb_internal b/pkg/sql/logictest/testdata/logic_test/distsql_crdb_internal index e683f9ceb9d0..8daf642f2133 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_crdb_internal +++ b/pkg/sql/logictest/testdata/logic_test/distsql_crdb_internal @@ -26,7 +26,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on b/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on index f6cf451b19c3..5754631c8e83 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on +++ b/pkg/sql/logictest/testdata/logic_test/distsql_distinct_on @@ -66,7 +66,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE x ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/4 {2} 2 …/1/4 …/1/6 {3} 3 …/1/6 …/1/7 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_enum b/pkg/sql/logictest/testdata/logic_test/distsql_enum index aed82b29ed8b..e900fb54b8a8 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_enum +++ b/pkg/sql/logictest/testdata/logic_test/distsql_enum @@ -48,7 +48,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE t1 WITH DETAILS] ORDER BY lease_holder, start_key ---- start_key end_key replicas lease_holder - …/1/0 {1} 1 + …/1/0 {1} 1 …/1/0 …/1/10 {1} 1 …/1/10 …/1/20 {2} 2 …/1/20 {3} 3 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_numtables b/pkg/sql/logictest/testdata/logic_test/distsql_numtables index 9c1c1b39ad26..2d35f20656e3 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_numtables +++ b/pkg/sql/logictest/testdata/logic_test/distsql_numtables @@ -30,14 +30,14 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE NumToSquare WITH DETAILS] ---- start_key end_key replicas lease_holder - {1} 1 + {1} 1 query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE NumToStr WITH DETAILS] ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2000 {1} 1 + …/1/2000 {1} 1 …/1/2000 …/1/4000 {2} 2 …/1/4000 …/1/6000 {3} 3 …/1/6000 …/1/8000 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_stats b/pkg/sql/logictest/testdata/logic_test/distsql_stats index 9321a522e4d1..7c7f8e4f646f 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_stats +++ b/pkg/sql/logictest/testdata/logic_test/distsql_stats @@ -34,7 +34,7 @@ query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE data WITH DETAILS] ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_subquery b/pkg/sql/logictest/testdata/logic_test/distsql_subquery index 6b586d43d6b2..e31c6b5a7f46 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_subquery +++ b/pkg/sql/logictest/testdata/logic_test/distsql_subquery @@ -23,7 +23,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE a ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 {2} 2 query T diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_union b/pkg/sql/logictest/testdata/logic_test/distsql_union index 8b9e9e62e54b..512399d3f08c 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_union +++ b/pkg/sql/logictest/testdata/logic_test/distsql_union @@ -32,7 +32,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE x ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 …/1/4 …/1/5 {4} 4 diff --git a/pkg/sql/logictest/testdata/logic_test/drop_function b/pkg/sql/logictest/testdata/logic_test/drop_function index 7a57e508a289..a513811845ae 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_function +++ b/pkg/sql/logictest/testdata/logic_test/drop_function @@ -22,6 +22,7 @@ CREATE FUNCTION public.f_test_drop() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -31,6 +32,7 @@ CREATE FUNCTION public.f_test_drop(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -44,6 +46,7 @@ CREATE FUNCTION sc1.f_test_drop(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -94,6 +97,7 @@ CREATE FUNCTION public.f_test_drop(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -107,6 +111,7 @@ CREATE FUNCTION sc1.f_test_drop(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -128,6 +133,7 @@ CREATE FUNCTION sc1.f_test_drop(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -153,6 +159,7 @@ CREATE FUNCTION public.f_test_drop() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -166,6 +173,7 @@ CREATE FUNCTION sc1.f_test_drop() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -211,6 +219,7 @@ CREATE FUNCTION public.f114677(v public.t114677) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 0; $$ @@ -220,6 +229,7 @@ CREATE FUNCTION public.f114677(v public.t114677_2) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -239,6 +249,7 @@ CREATE FUNCTION public.f114677(v public.t114677_2) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/drop_index b/pkg/sql/logictest/testdata/logic_test/drop_index index e8b3e1d82803..2a6ac64c520a 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_index +++ b/pkg/sql/logictest/testdata/logic_test/drop_index @@ -339,8 +339,8 @@ fk1 CREATE TABLE public.fk1 ( # test that notices are generated on index drops subtest notice_on_drop_index -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE drop_index_test(a int); CREATE INDEX drop_index_test_index ON drop_index_test(a); DROP INDEX drop_index_test_index ---- @@ -360,7 +360,7 @@ DROP INDEX t_secondary CASCADE; ALTER TABLE t DROP COLUMN b; INSERT INTO t SELECT a + 1 FROM t; -skipif config local-read-committed 112488 +skipif config weak-iso-level-configs 112488 statement error pgcode 23505 duplicate key value violates unique constraint "t_secondary"\nDETAIL: Key \(b\)=\(0\.0\) already exists UPSERT INTO t SELECT a + 1 FROM t; diff --git a/pkg/sql/logictest/testdata/logic_test/drop_procedure b/pkg/sql/logictest/testdata/logic_test/drop_procedure index 20b3642ef109..8fb221c1630f 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_procedure +++ b/pkg/sql/logictest/testdata/logic_test/drop_procedure @@ -18,11 +18,13 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE public.p_test_drop] ORDER BY ---- CREATE PROCEDURE public.p_test_drop() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ CREATE PROCEDURE public.p_test_drop(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -32,6 +34,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p_test_drop] ---- CREATE PROCEDURE sc1.p_test_drop(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -81,6 +84,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE public.p_test_drop] ---- CREATE PROCEDURE public.p_test_drop(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -90,6 +94,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p_test_drop] ---- CREATE PROCEDURE sc1.p_test_drop(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -110,6 +115,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p_test_drop] ---- CREATE PROCEDURE sc1.p_test_drop(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -134,6 +140,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE public.p_test_drop] ---- CREATE PROCEDURE public.p_test_drop() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -143,6 +150,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc1.p_test_drop] ---- CREATE PROCEDURE sc1.p_test_drop() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning_5node b/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning_5node index 0042eeeececd..e291629a7edb 100644 --- a/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning_5node +++ b/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning_5node @@ -23,7 +23,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE k ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 diff --git a/pkg/sql/logictest/testdata/logic_test/fk b/pkg/sql/logictest/testdata/logic_test/fk index f14124572f09..15bc77890e7c 100644 --- a/pkg/sql/logictest/testdata/logic_test/fk +++ b/pkg/sql/logictest/testdata/logic_test/fk @@ -3958,7 +3958,7 @@ statement ok ALTER TABLE t65890_c ADD CONSTRAINT fk FOREIGN KEY (a, b) REFERENCES t65890_p(a, b) # Ensure that a fast path on ba_idx is used. -skipif config local-read-committed +skipif config weak-iso-level-configs query T SELECT * FROM [EXPLAIN INSERT INTO t65890_c (k, b, a) VALUES (1, 2, 3)] OFFSET 3 ---- @@ -3968,7 +3968,7 @@ SELECT * FROM [EXPLAIN INSERT INTO t65890_c (k, b, a) VALUES (1, 2, 3)] OFFSET 3 FK check: t65890_p@ba_idx size: 3 columns, 1 row -onlyif config local-read-committed +onlyif config weak-iso-level-configs query T SELECT * FROM [EXPLAIN INSERT INTO t65890_c (k, b, a) VALUES (1, 2, 3)] OFFSET 3 ---- @@ -4066,28 +4066,28 @@ CREATE DATABASE db_type_test statement ok CREATE TABLE db_type_test.public.parent (id INT8 PRIMARY KEY, name STRING NULL) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE db_type_test.public.child_1 (id INT8 PRIMARY KEY, parent_id INT4 NULL REFERENCES db_type_test.public.parent(id), name STRING NULL) ---- NOTICE: type of foreign key column "parent_id" (INT4) is not identical to referenced column "parent"."id" (INT8) -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok CREATE TABLE db_type_test.public.child_1 (id INT8 PRIMARY KEY, parent_id INT4 NULL REFERENCES db_type_test.public.parent(id), name STRING NULL) statement ok CREATE TABLE db_type_test.public.child_2 (id INT8 PRIMARY KEY, parent_id INT4 NULL, name STRING NULL) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE db_type_test.public.child_2 ADD CONSTRAINT child_2_fk_parent_id FOREIGN KEY (parent_id) REFERENCES db_type_test.public.parent(id) ---- NOTICE: type of foreign key column "parent_id" (INT4) is not identical to referenced column "parent"."id" (INT8) -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok ALTER TABLE db_type_test.public.child_2 ADD CONSTRAINT child_2_fk_parent_id FOREIGN KEY (parent_id) REFERENCES db_type_test.public.parent(id) @@ -4219,27 +4219,52 @@ subtest ensure_notice_when_fk_type_not_equal_in_alter statement ok CREATE TABLE t1_fk ( pk INT PRIMARY KEY, col1 CHAR(7), col2 INT4, UNIQUE (col1,col2), FAMILY f1 (pk,col1,col2) ); +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE t2_fk ( pk INT PRIMARY KEY, t1_fk_col1 CHAR(8), t1_fk_col2 INT4, col3 INT, FOREIGN KEY (t1_fk_col1,t1_fk_col2) REFERENCES t1_fk(col1, col2), FAMILY f1 (pk,t1_fk_col1,t1_fk_col2) ); ---- NOTICE: type of foreign key column "t1_fk_col1" \(CHAR\(8\)\) is not identical to referenced column "t1_fk"."col1" \(CHAR\(7\)\) +onlyif config weak-iso-level-configs +query T noticetrace +CREATE TABLE t2_fk ( pk INT PRIMARY KEY, t1_fk_col1 CHAR(8), t1_fk_col2 INT4, col3 INT, FOREIGN KEY (t1_fk_col1,t1_fk_col2) REFERENCES t1_fk(col1, col2), FAMILY f1 (pk,t1_fk_col1,t1_fk_col2) ); +---- +NOTICE: setting transaction isolation level to SERIALIZABLE due to schema change +NOTICE: type of foreign key column "t1_fk_col1" \(CHAR\(8\)\) is not identical to referenced column "t1_fk"."col1" \(CHAR\(7\)\) + # Test trivial data type change skipif config local-legacy-schema-changer skipif config local-mixed-24.1 +skipif config weak-iso-level-configs +query T noticetrace +ALTER TABLE t2_fk ALTER COLUMN t1_fk_col2 SET DATA TYPE INT8 +---- +NOTICE: type of foreign key column "t1_fk_col2" \(INT8\) is not identical to referenced column "t1_fk"."col2" \(INT4\) + +onlyif config weak-iso-level-configs query T noticetrace ALTER TABLE t2_fk ALTER COLUMN t1_fk_col2 SET DATA TYPE INT8 ---- +NOTICE: setting transaction isolation level to SERIALIZABLE due to schema change NOTICE: type of foreign key column "t1_fk_col2" \(INT8\) is not identical to referenced column "t1_fk"."col2" \(INT4\) # Test validation data type change skipif config local-legacy-schema-changer skipif config local-mixed-24.1 +skipif config weak-iso-level-configs +query T noticetrace +ALTER TABLE t2_fk ALTER COLUMN t1_fk_col1 SET DATA TYPE CHAR(5) +---- +NOTICE: type of foreign key column "t1_fk_col1" \(CHAR\(5\)\) is not identical to referenced column "t1_fk"."col1" \(CHAR\(7\)\) + +onlyif config weak-iso-level-configs query T noticetrace ALTER TABLE t2_fk ALTER COLUMN t1_fk_col1 SET DATA TYPE CHAR(5) ---- +NOTICE: setting transaction isolation level to SERIALIZABLE due to schema change NOTICE: type of foreign key column "t1_fk_col1" \(CHAR\(5\)\) is not identical to referenced column "t1_fk"."col1" \(CHAR\(7\)\) + skipif config local-legacy-schema-changer skipif config local-mixed-24.1 query TT diff --git a/pkg/sql/logictest/testdata/logic_test/gen_test_objects b/pkg/sql/logictest/testdata/logic_test/gen_test_objects index 4d8231bdaf3d..562d960f4888 100644 --- a/pkg/sql/logictest/testdata/logic_test/gen_test_objects +++ b/pkg/sql/logictest/testdata/logic_test/gen_test_objects @@ -179,17 +179,18 @@ query T SELECT table_name FROM [SHOW TABLES] ORDER BY table_name ---- -protected_ts_meta -ran gelog -replicat i̾on_stats -s&qlliveness -sche"duled_j"obs -statement_diagnostics_requests -statement_statistic%vs +database_role_settings +eventLog +j ob\\x97s +locations +mvcc&_statistics +protected_ts_records +repor +ts_met😐a +span_stats_buckets +sqlliveness +statement_diagnosti cs sta😣tement_statistics -table_statistiCs -tenant_id_seq -transaction_statistics # Again, the column names are randomized. query TTT @@ -198,26 +199,26 @@ WHERE table_catalog = 'newdb2' AND table_schema = 'public' ORDER BY table_name, column_name LIMIT 20 ---- -protected_ts_meta "num_re cords" bigint -protected_ts_meta num_spans bigint -protected_ts_meta rowid bigint -protected_ts_meta "singleton*" boolean -protected_ts_meta "t'o tal_b,yt😫es" bigint -protected_ts_meta "vers ion" bigint -"ran gelog" "%rangeID" bigint -"ran gelog" "eventType" text -"ran gelog" i̅nfo text -"ran gelog" "otherRangeID" bigint -"ran gelog" rowid bigint -"ran gelog" "storeID" bigint -"ran gelog" "timestamp" timestamp without time zone -"ran gelog" "unique%qID" bigint -"replicat i̾on_stats" over_replic̣ated_ranges bigint -"replicat i̾on_stats" report_id bigint -"replicat i̾on_stats" rowid bigint -"replicat i̾on_stats" "s%pubzone_id" bigint -"replicat i̾on_stats" total_ranges bigint -"replicat i̾on_stats" unavailable_ranges bigint +database_role_settings database_id oid +database_role_settings "ro'le_id" oid +database_role_settings role_name text +database_role_settings rowid bigint +database_role_settings settings ARRAY +"eventLog" "\\xa7t̻%pimest̏am p" timestamp without time zone +"eventLog" "eventType" text +"eventLog" info text +"eventLog" "reportingID" bigint +"eventLog" rowid bigint +"eventLog" "targ%ve}tID" bigint +"eventLog" "uniqueID" bytea +"j ob\\x97s" "claim_instAnce_id" bigint +"j ob\\x97s" "claim_session?_ id" bytea +"j ob\\x97s" "create\nd" timestamp without time zone +"j ob\\x97s" created_by_id bigint +"j ob\\x97s" created_by_type text +"j ob\\x97s" "id.\\U000FA67C" bigint +"j ob\\x97s" "j%qob_type " text +"j ob\\x97s" l😲ast_ru😻n timestamp without time zone subtest templates/different_templates_in_each_db @@ -235,15 +236,15 @@ SELECT quote_ident(database_name), quote_ident(schema_name), quote_ident(name) FROM "".crdb_internal.tables WHERE database_name ILIKE '%d%b%t%' ORDER BY database_name, schema_name, name ---- -"d%qbt_1" public external_connections -"d%qbt_1" public "r%56ol e_mem bers" -"d%qbt_1" public task_payloads -"d%qbt_2" public locations -"d%qbt_2" public "scheduled_joBs" -"d%qbt_2" public span_stat😪s_sam͇ples -dbt_3 public "span_st%pats_unique_keys" -dbt_3 public "tenant_sETtings" -dbt_3 public t😔enants +"d%qbt_1" public statement_activity +"d%qbt_1" public ten😘ant_id_seq +"d%qbt_1" public tra😗nsaction_execution_insights +"d%qbt_2" public "mvc%pc_statistics" +"d%qbt_2" public "span_co""nfigurations" +"d%qbt_2" public state😟ment_diagnostics_requests +dbt_3 public statement_diagnostics +dbt_3 public task_payloads +dbt_3 public "tenant_id _seq" statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/grant_on_all_sequences_in_schema b/pkg/sql/logictest/testdata/logic_test/grant_on_all_sequences_in_schema index a5a5c6f30ca3..a7d8ccfcb41b 100644 --- a/pkg/sql/logictest/testdata/logic_test/grant_on_all_sequences_in_schema +++ b/pkg/sql/logictest/testdata/logic_test/grant_on_all_sequences_in_schema @@ -36,11 +36,19 @@ GRANT SELECT ON ALL SEQUENCES IN SCHEMA s TO testuser # This should be a no-op, since backup privellege is not # supported on sequences. +skipif config weak-iso-level-configs query T noticetrace GRANT BACKUP ON ALL TABLES IN SCHEMA S TO testuser ---- NOTICE: some privileges have no effect on sequences: [BACKUP] +onlyif config weak-iso-level-configs +query T noticetrace +GRANT BACKUP ON ALL TABLES IN SCHEMA S TO testuser +---- +NOTICE: setting transaction isolation level to SERIALIZABLE due to schema change +NOTICE: some privileges have no effect on sequences: [BACKUP] + statement error pgcode 0LP01 invalid privilege type BACKUP for sequence GRANT BACKUP ON ALL SEQUENCES IN SCHEMA S TO testuser diff --git a/pkg/sql/logictest/testdata/logic_test/grant_revoke_with_grant_option b/pkg/sql/logictest/testdata/logic_test/grant_revoke_with_grant_option index 407c21279184..90ed3db1d3e6 100644 --- a/pkg/sql/logictest/testdata/logic_test/grant_revoke_with_grant_option +++ b/pkg/sql/logictest/testdata/logic_test/grant_revoke_with_grant_option @@ -566,14 +566,14 @@ statement ok GRANT SELECT ON TABLE t1 TO testuser2 # Show a notice when revoking privileges from the owner. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace REVOKE ALL PRIVILEGES ON TABLE t1 FROM testuser ---- NOTICE: testuser is the owner of t1 and still has all privileges implicitly -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok REVOKE ALL PRIVILEGES ON TABLE t1 FROM testuser diff --git a/pkg/sql/logictest/testdata/logic_test/grant_table b/pkg/sql/logictest/testdata/logic_test/grant_table index 913e3ced9343..4ec5e7cfe695 100644 --- a/pkg/sql/logictest/testdata/logic_test/grant_table +++ b/pkg/sql/logictest/testdata/logic_test/grant_table @@ -60,6 +60,7 @@ test crdb_internal databases test crdb_internal default_privileges table public SELECT false test crdb_internal feature_usage table public SELECT false test crdb_internal forward_dependencies table public SELECT false +test crdb_internal fully_qualified_names table public SELECT false test crdb_internal gossip_alerts table public SELECT false test crdb_internal gossip_liveness table public SELECT false test crdb_internal gossip_network table public SELECT false @@ -1012,6 +1013,10 @@ system public statement_execution_insights table admin system public statement_execution_insights table admin INSERT true system public statement_execution_insights table admin SELECT true system public statement_execution_insights table admin UPDATE true +system public table_metadata table admin DELETE true +system public table_metadata table admin INSERT true +system public table_metadata table admin SELECT true +system public table_metadata table admin UPDATE true a public NULL schema admin ALL true defaultdb public NULL schema admin ALL true postgres public NULL schema admin ALL true @@ -1223,6 +1228,10 @@ system public statement_execution_insights table root system public statement_execution_insights table root INSERT true system public statement_execution_insights table root SELECT true system public statement_execution_insights table root UPDATE true +system public table_metadata table root DELETE true +system public table_metadata table root INSERT true +system public table_metadata table root SELECT true +system public table_metadata table root UPDATE true a pg_extension NULL schema public USAGE false a public NULL schema public CREATE false a public NULL schema public USAGE false @@ -2277,6 +2286,14 @@ system public statement_execution_insights table root system public statement_execution_insights table root UPDATE true system public statement_statistics table admin SELECT true system public statement_statistics table root SELECT true +system public table_metadata table admin DELETE true +system public table_metadata table admin INSERT true +system public table_metadata table admin SELECT true +system public table_metadata table admin UPDATE true +system public table_metadata table root DELETE true +system public table_metadata table root INSERT true +system public table_metadata table root SELECT true +system public table_metadata table root UPDATE true system public table_statistics table admin DELETE true system public table_statistics table admin INSERT true system public table_statistics table admin SELECT true diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index a60dfe8d4ca4..c0eee5c996af 100644 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -425,6 +425,7 @@ crdb_internal databases crdb_internal default_privileges crdb_internal feature_usage crdb_internal forward_dependencies +crdb_internal fully_qualified_names crdb_internal gossip_alerts crdb_internal gossip_liveness crdb_internal gossip_network @@ -783,6 +784,7 @@ databases default_privileges feature_usage forward_dependencies +fully_qualified_names gossip_alerts gossip_liveness gossip_network @@ -1148,6 +1150,7 @@ table_columns # Check that the metadata is reported properly. skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTT colnames SELECT table_catalog, table_schema, table_name, table_type, is_insertable_into FROM system.information_schema.tables ORDER BY table_name, table_schema @@ -1224,6 +1227,7 @@ system information_schema foreign_servers system information_schema foreign_table_options SYSTEM VIEW NO system information_schema foreign_tables SYSTEM VIEW NO system crdb_internal forward_dependencies SYSTEM VIEW NO +system crdb_internal fully_qualified_names SYSTEM VIEW NO system pg_extension geography_columns SYSTEM VIEW NO system pg_extension geometry_columns SYSTEM VIEW NO system crdb_internal gossip_alerts SYSTEM VIEW NO @@ -1485,6 +1489,7 @@ system crdb_internal table_columns system information_schema table_constraints SYSTEM VIEW NO system information_schema table_constraints_extensions SYSTEM VIEW NO system crdb_internal table_indexes SYSTEM VIEW NO +system public table_metadata BASE TABLE YES system information_schema table_privileges SYSTEM VIEW NO system crdb_internal table_row_statistics SYSTEM VIEW NO system crdb_internal table_spans SYSTEM VIEW NO @@ -1583,6 +1588,7 @@ SET DATABASE = test ## information_schema.constraint_column_usage skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTTTTTT colnames SELECT * FROM system.information_schema.table_constraints @@ -1825,6 +1831,23 @@ system public 29_42_8_not_null system public 29_42_9_not_null system public statement_statistics CHECK NO NO system public check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8 system public statement_statistics CHECK NO NO system public primary system public statement_statistics PRIMARY KEY NO NO +system public 29_67_10_not_null system public table_metadata CHECK NO NO +system public 29_67_11_not_null system public table_metadata CHECK NO NO +system public 29_67_12_not_null system public table_metadata CHECK NO NO +system public 29_67_13_not_null system public table_metadata CHECK NO NO +system public 29_67_15_not_null system public table_metadata CHECK NO NO +system public 29_67_16_not_null system public table_metadata CHECK NO NO +system public 29_67_1_not_null system public table_metadata CHECK NO NO +system public 29_67_2_not_null system public table_metadata CHECK NO NO +system public 29_67_3_not_null system public table_metadata CHECK NO NO +system public 29_67_4_not_null system public table_metadata CHECK NO NO +system public 29_67_5_not_null system public table_metadata CHECK NO NO +system public 29_67_6_not_null system public table_metadata CHECK NO NO +system public 29_67_7_not_null system public table_metadata CHECK NO NO +system public 29_67_8_not_null system public table_metadata CHECK NO NO +system public 29_67_9_not_null system public table_metadata CHECK NO NO +system public check_crdb_internal_last_updated_table_id_shard_16 system public table_metadata CHECK NO NO +system public primary system public table_metadata PRIMARY KEY NO NO system public 29_20_10_not_null system public table_statistics CHECK NO NO system public 29_20_1_not_null system public table_statistics CHECK NO NO system public 29_20_2_not_null system public table_statistics CHECK NO NO @@ -1930,6 +1953,7 @@ NULL NULL UTF8 UCS skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTT colnames SELECT * FROM system.information_schema.check_constraints @@ -2179,6 +2203,20 @@ system public 29_66_2_not_null system public 29_66_3_not_null transaction_fingerprint_id IS NOT NULL system public 29_66_4_not_null statement_id IS NOT NULL system public 29_66_5_not_null statement_fingerprint_id IS NOT NULL +system public 29_67_10_not_null total_ranges IS NOT NULL +system public 29_67_11_not_null total_live_data_bytes IS NOT NULL +system public 29_67_12_not_null total_data_bytes IS NOT NULL +system public 29_67_13_not_null perc_live_data IS NOT NULL +system public 29_67_15_not_null last_updated IS NOT NULL +system public 29_67_1_not_null db_id IS NOT NULL +system public 29_67_2_not_null table_id IS NOT NULL +system public 29_67_3_not_null db_name IS NOT NULL +system public 29_67_4_not_null schema_name IS NOT NULL +system public 29_67_5_not_null table_name IS NOT NULL +system public 29_67_6_not_null total_columns IS NOT NULL +system public 29_67_7_not_null total_indexes IS NOT NULL +system public 29_67_8_not_null store_ids IS NOT NULL +system public 29_67_9_not_null replication_size_bytes IS NOT NULL system public 29_6_1_not_null name IS NOT NULL system public 29_6_2_not_null value IS NOT NULL system public 29_6_3_not_null lastUpdated IS NOT NULL @@ -2191,11 +2229,13 @@ system public check_crdb_internal_aggregated_ts_app_nam system public check_crdb_internal_created_at_database_id_index_id_table_id_shard_16 ((crdb_internal_created_at_database_id_index_id_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_crdb_internal_end_time_start_time_shard_16 ((crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_crdb_internal_end_time_start_time_shard_16 ((crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) +system public check_crdb_internal_last_updated_table_id_shard_16 ((crdb_internal_last_updated_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_sampling_probability ((sampling_probability BETWEEN 0.0:::FLOAT8 AND 1.0:::FLOAT8)) system public check_singleton ((singleton)) system public single_row ((singleton)) skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTTTT colnames SELECT * FROM system.information_schema.constraint_column_usage @@ -2304,6 +2344,9 @@ system public statement_statistics fingerprint_id system public statement_statistics node_id system public primary system public statement_statistics plan_hash system public primary system public statement_statistics transaction_fingerprint_id system public primary +system public table_metadata crdb_internal_last_updated_table_id_shard_16 system public check_crdb_internal_last_updated_table_id_shard_16 +system public table_metadata db_id system public primary +system public table_metadata table_id system public primary system public table_statistics statisticID system public primary system public table_statistics tableID system public primary system public task_payloads id system public primary @@ -2335,6 +2378,7 @@ system public web_sessions id system public zones id system public primary skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTTTT colnames SELECT * FROM system.information_schema.constraint_column_usage @@ -2443,6 +2487,9 @@ system public statement_statistics fingerprint_id system public statement_statistics node_id system public primary system public statement_statistics plan_hash system public primary system public statement_statistics transaction_fingerprint_id system public primary +system public table_metadata crdb_internal_last_updated_table_id_shard_16 system public check_crdb_internal_last_updated_table_id_shard_16 +system public table_metadata db_id system public primary +system public table_metadata table_id system public primary system public table_statistics statisticID system public primary system public table_statistics tableID system public primary system public task_payloads id system public primary @@ -2474,6 +2521,7 @@ system public web_sessions id system public zones id system public primary skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTT colnames SELECT * FROM system.information_schema.check_constraints @@ -2723,6 +2771,20 @@ system public 29_66_2_not_null system public 29_66_3_not_null transaction_fingerprint_id IS NOT NULL system public 29_66_4_not_null statement_id IS NOT NULL system public 29_66_5_not_null statement_fingerprint_id IS NOT NULL +system public 29_67_10_not_null total_ranges IS NOT NULL +system public 29_67_11_not_null total_live_data_bytes IS NOT NULL +system public 29_67_12_not_null total_data_bytes IS NOT NULL +system public 29_67_13_not_null perc_live_data IS NOT NULL +system public 29_67_15_not_null last_updated IS NOT NULL +system public 29_67_1_not_null db_id IS NOT NULL +system public 29_67_2_not_null table_id IS NOT NULL +system public 29_67_3_not_null db_name IS NOT NULL +system public 29_67_4_not_null schema_name IS NOT NULL +system public 29_67_5_not_null table_name IS NOT NULL +system public 29_67_6_not_null total_columns IS NOT NULL +system public 29_67_7_not_null total_indexes IS NOT NULL +system public 29_67_8_not_null store_ids IS NOT NULL +system public 29_67_9_not_null replication_size_bytes IS NOT NULL system public 29_6_1_not_null name IS NOT NULL system public 29_6_2_not_null value IS NOT NULL system public 29_6_3_not_null lastUpdated IS NOT NULL @@ -2735,6 +2797,7 @@ system public check_crdb_internal_aggregated_ts_app_nam system public check_crdb_internal_created_at_database_id_index_id_table_id_shard_16 ((crdb_internal_created_at_database_id_index_id_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_crdb_internal_end_time_start_time_shard_16 ((crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_crdb_internal_end_time_start_time_shard_16 ((crdb_internal_end_time_start_time_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) +system public check_crdb_internal_last_updated_table_id_shard_16 ((crdb_internal_last_updated_table_id_shard_16 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8, 12:::INT8, 13:::INT8, 14:::INT8, 15:::INT8))) system public check_sampling_probability ((sampling_probability BETWEEN 0.0:::FLOAT8 AND 1.0:::FLOAT8)) system public check_singleton ((singleton)) system public single_row ((singleton)) @@ -2830,6 +2893,7 @@ data_type column_default smallint 0 skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTI colnames SELECT table_catalog, table_schema, table_name, column_name, ordinal_position FROM system.information_schema.columns @@ -3013,6 +3077,7 @@ system public sql_instances addr system public sql_instances binary_version 7 system public sql_instances crdb_region 6 system public sql_instances id 1 +system public sql_instances is_draining 8 system public sql_instances locality 4 system public sql_instances session_id 3 system public sql_instances sql_addr 5 @@ -3105,6 +3170,22 @@ system public statement_statistics service_latency system public statement_statistics statistics 9 system public statement_statistics total_estimated_execution_time 18 system public statement_statistics transaction_fingerprint_id 3 +system public table_metadata crdb_internal_last_updated_table_id_shard_16 16 +system public table_metadata db_id 1 +system public table_metadata db_name 3 +system public table_metadata last_update_error 14 +system public table_metadata last_updated 15 +system public table_metadata perc_live_data 13 +system public table_metadata replication_size_bytes 9 +system public table_metadata schema_name 4 +system public table_metadata store_ids 8 +system public table_metadata table_id 2 +system public table_metadata table_name 5 +system public table_metadata total_columns 6 +system public table_metadata total_data_bytes 12 +system public table_metadata total_indexes 7 +system public table_metadata total_live_data_bytes 11 +system public table_metadata total_ranges 10 system public table_statistics avgSize 10 system public table_statistics columnIDs 4 system public table_statistics createdAt 5 @@ -3828,6 +3909,7 @@ root other_db public ALL YES ## information_schema.table_privileges and information_schema.role_table_grants skipif config local-mixed-24.1 +skipif config local-mixed-24.2 # root can see everything query TTTTTTTT colnames,rowsort SELECT * FROM system.information_schema.table_privileges ORDER BY table_schema, table_name, table_schema, grantee, privilege_type @@ -3866,6 +3948,7 @@ NULL public system crdb_internal databases NULL public system crdb_internal default_privileges SELECT NO YES NULL public system crdb_internal feature_usage SELECT NO YES NULL public system crdb_internal forward_dependencies SELECT NO YES +NULL public system crdb_internal fully_qualified_names SELECT NO YES NULL public system crdb_internal gossip_alerts SELECT NO YES NULL public system crdb_internal gossip_liveness SELECT NO YES NULL public system crdb_internal gossip_network SELECT NO YES @@ -4448,6 +4531,14 @@ NULL root system public statement_execution_insight NULL root system public statement_execution_insights UPDATE YES NO NULL admin system public statement_statistics SELECT YES YES NULL root system public statement_statistics SELECT YES YES +NULL admin system public table_metadata DELETE YES NO +NULL admin system public table_metadata INSERT YES NO +NULL admin system public table_metadata SELECT YES YES +NULL admin system public table_metadata UPDATE YES NO +NULL root system public table_metadata DELETE YES NO +NULL root system public table_metadata INSERT YES NO +NULL root system public table_metadata SELECT YES YES +NULL root system public table_metadata UPDATE YES NO NULL admin system public table_statistics DELETE YES NO NULL admin system public table_statistics INSERT YES NO NULL admin system public table_statistics SELECT YES YES @@ -4538,6 +4629,7 @@ NULL root system public zones NULL root system public zones UPDATE YES NO skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTTTTT colnames,rowsort SELECT * FROM system.information_schema.role_table_grants ---- @@ -4575,6 +4667,7 @@ NULL public system crdb_internal databases NULL public system crdb_internal default_privileges SELECT NO YES NULL public system crdb_internal feature_usage SELECT NO YES NULL public system crdb_internal forward_dependencies SELECT NO YES +NULL public system crdb_internal fully_qualified_names SELECT NO YES NULL public system crdb_internal gossip_alerts SELECT NO YES NULL public system crdb_internal gossip_liveness SELECT NO YES NULL public system crdb_internal gossip_network SELECT NO YES @@ -5245,6 +5338,14 @@ NULL root system public statement_execution_insight NULL root system public statement_execution_insights INSERT YES NO NULL root system public statement_execution_insights SELECT YES YES NULL root system public statement_execution_insights UPDATE YES NO +NULL admin system public table_metadata DELETE YES NO +NULL admin system public table_metadata INSERT YES NO +NULL admin system public table_metadata SELECT YES YES +NULL admin system public table_metadata UPDATE YES NO +NULL root system public table_metadata DELETE YES NO +NULL root system public table_metadata INSERT YES NO +NULL root system public table_metadata SELECT YES YES +NULL root system public table_metadata UPDATE YES NO statement ok USE other_db; @@ -6177,6 +6278,7 @@ optimizer_always_use_histograms on optimizer_hoist_uncorrelated_equality_subqueries on optimizer_merge_joins_enabled on optimizer_prove_implication_with_virtual_computed_columns on +optimizer_push_limit_into_project_filtered_scan on optimizer_push_offset_into_index_join on optimizer_use_conditional_hoist_fix on optimizer_use_forecasts on diff --git a/pkg/sql/logictest/testdata/logic_test/inverted_filter_geospatial_dist b/pkg/sql/logictest/testdata/logic_test/inverted_filter_geospatial_dist index 164623f3fcfe..524cd9fb5fbf 100644 --- a/pkg/sql/logictest/testdata/logic_test/inverted_filter_geospatial_dist +++ b/pkg/sql/logictest/testdata/logic_test/inverted_filter_geospatial_dist @@ -63,7 +63,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX g ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1152921574000000000 {1} 1 + …/1152921574000000000 {1} 1 …/1152921574000000000 {2} 2 # Distributed. @@ -92,7 +92,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX g ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1152921574000000000 {2} 2 + …/1152921574000000000 {2} 2 …/1152921574000000000 {2} 2 query I diff --git a/pkg/sql/logictest/testdata/logic_test/inverted_join_geospatial_dist b/pkg/sql/logictest/testdata/logic_test/inverted_join_geospatial_dist index c919bbfb9d28..cb72cfb44dd2 100644 --- a/pkg/sql/logictest/testdata/logic_test/inverted_join_geospatial_dist +++ b/pkg/sql/logictest/testdata/logic_test/inverted_join_geospatial_dist @@ -39,7 +39,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW EXPERIMENTAL_RANGES FROM TABLE ltable WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 {3} 3 diff --git a/pkg/sql/logictest/testdata/logic_test/lock_timeout b/pkg/sql/logictest/testdata/logic_test/lock_timeout index b1ec65a42fad..b2404311e42c 100644 --- a/pkg/sql/logictest/testdata/logic_test/lock_timeout +++ b/pkg/sql/logictest/testdata/logic_test/lock_timeout @@ -18,12 +18,13 @@ user testuser statement ok SET lock_timeout = '1ms' -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t -# Under READ COMMITTED, non-locking reads don't block on writes. -onlyif config local-read-committed +# Under READ COMMITTED and REPEATABLE READ, non-locking reads don't block on +# writes. +onlyif config weak-iso-level-configs statement ok SELECT * FROM t @@ -33,12 +34,13 @@ SELECT * FROM t FOR UPDATE statement error pgcode 55P03 could not obtain lock on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t FOR UPDATE NOWAIT -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t WHERE k = 1 -# Under READ COMMITTED, non-locking reads don't block on writes. -onlyif config local-read-committed +# Under READ COMMITTED and REPEATABLE READ, non-locking reads don't block on +# writes. +onlyif config weak-iso-level-configs statement ok SELECT * FROM t WHERE k = 1 @@ -57,30 +59,33 @@ SELECT * FROM t WHERE k = 2 FOR UPDATE statement ok SELECT * FROM t WHERE k = 2 FOR UPDATE NOWAIT -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t WHERE v = 9 -# Under READ COMMITTED, non-locking reads don't block on writes. -onlyif config local-read-committed +# Under READ COMMITTED and REPEATABLE READ, non-locking reads don't block on +# writes. +onlyif config weak-iso-level-configs statement ok SELECT * FROM t WHERE v = 9 -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t WHERE v = 9 FOR UPDATE -# Under READ COMMITTED, non-locking reads don't block on writes. -onlyif config local-read-committed +# Under READ COMMITTED and REPEATABLE READ, non-locking reads don't block on +# writes. +onlyif config weak-iso-level-configs statement ok SELECT * FROM t WHERE v = 9 -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 could not obtain lock on row \(k\)=\(1\) in t@t_pkey SELECT * FROM t WHERE v = 9 FOR UPDATE NOWAIT -# Under READ COMMITTED, non-locking reads don't block on writes. -onlyif config local-read-committed +# Under READ COMMITTED and REPEATABLE READ, non-locking reads don't block on +# writes. +onlyif config weak-iso-level-configs statement ok SELECT * FROM t WHERE v = 9 FOR UPDATE NOWAIT @@ -105,11 +110,11 @@ UPDATE t SET v = 4 WHERE k = 1 statement ok UPDATE t SET v = 4 WHERE k = 2 -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey UPDATE t SET v = 4 WHERE v = 9 -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok UPDATE t SET v = 4 WHERE v = 9 @@ -122,10 +127,10 @@ DELETE FROM t WHERE k = 1 statement ok DELETE FROM t WHERE k = 2 -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 55P03 canceling statement due to lock timeout on row \(k\)=\(1\) in t@t_pkey DELETE FROM t WHERE v = 9 -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok DELETE FROM t WHERE v = 9 diff --git a/pkg/sql/logictest/testdata/logic_test/lookup_join b/pkg/sql/logictest/testdata/logic_test/lookup_join index 6b95e90bd70c..5eeb9fa63d45 100644 --- a/pkg/sql/logictest/testdata/logic_test/lookup_join +++ b/pkg/sql/logictest/testdata/logic_test/lookup_join @@ -1340,12 +1340,12 @@ xyz 4 xyz 4 query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x < xy1.x ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 @@ -1353,9 +1353,9 @@ SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x < xy1.x ORDER B query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x > xy1.x ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:52:12.19515 +0000 UTC NULL +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 @@ -1366,18 +1366,18 @@ SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x > xy1.x ORDER B query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x <= xy1.x ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC -infinity +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:52:12.19515 +0000 UTC NULL -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 @@ -1386,12 +1386,12 @@ SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x <= xy1.x ORDER query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x >= xy1.x ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC -infinity +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:52:12.19515 +0000 UTC NULL +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:52:12.19515 +0000 UTC NULL 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 @@ -1406,46 +1406,46 @@ SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.x >= xy1.x ORDER query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.y < xy1.y ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +NULL infinity 0000-01-01 00:00:00 +0000 UTC -infinity +NULL infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +NULL infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +NULL infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.y > xy1.y ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity NULL infinity +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 NULL infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 NULL infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 NULL infinity query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.y <= xy1.y ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -NULL 294276-12-31 23:59:59.999999 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -NULL 294276-12-31 23:59:59.999999 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +NULL infinity NULL infinity +NULL infinity 0000-01-01 00:00:00 +0000 UTC -infinity +NULL infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +NULL infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +NULL infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC -infinity +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 @@ -1453,20 +1453,20 @@ NULL 294276-12-31 23:59:59.999999 +0000 +0000 query TTTT SELECT * FROM time_xy xy1 INNER LOOKUP JOIN time_xy xy2 ON xy2.y >= xy1.y ORDER BY xy1.x, xy1.y, xy2.x, xy2.y ---- -NULL 294276-12-31 23:59:59.999999 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC -4713-11-24 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +NULL infinity NULL infinity +0000-01-01 00:00:00 +0000 UTC -infinity NULL infinity +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC -infinity +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC -infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 +0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 NULL infinity 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 NULL infinity 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 00:00:00 +0000 UTC 2016-06-22 19:10:25 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-01 23:59:59.999999 +0000 UTC 1970-01-01 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 -0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 NULL 294276-12-31 23:59:59.999999 +0000 +0000 +0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 NULL infinity 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 0000-01-02 00:00:00 +0000 UTC 2042-01-01 00:00:00 +0000 +0000 statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/merge_join_dist b/pkg/sql/logictest/testdata/logic_test/merge_join_dist index c619f13da80f..f4da6e9ae6b0 100644 --- a/pkg/sql/logictest/testdata/logic_test/merge_join_dist +++ b/pkg/sql/logictest/testdata/logic_test/merge_join_dist @@ -30,7 +30,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE l WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 {3} 3 diff --git a/pkg/sql/logictest/testdata/logic_test/notice b/pkg/sql/logictest/testdata/logic_test/notice index 385f17142cec..bb1936667c2a 100644 --- a/pkg/sql/logictest/testdata/logic_test/notice +++ b/pkg/sql/logictest/testdata/logic_test/notice @@ -26,8 +26,8 @@ statement ok CREATE DATABASE d; CREATE TABLE d.t (x int) -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE d.t RENAME TO d.t2 ---- @@ -41,8 +41,8 @@ CREATE TYPE color AS ENUM () statement ok ALTER TYPE color ADD VALUE 'black' -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TYPE color ADD VALUE IF NOT EXISTS 'black' ---- @@ -51,8 +51,8 @@ NOTICE: enum value "black" already exists, skipping statement ok CREATE MATERIALIZED VIEW v AS SELECT 1 -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace REFRESH MATERIALIZED VIEW CONCURRENTLY v ---- diff --git a/pkg/sql/logictest/testdata/logic_test/partial_index b/pkg/sql/logictest/testdata/logic_test/partial_index index 38538ef36124..6ec6b7f262fc 100644 --- a/pkg/sql/logictest/testdata/logic_test/partial_index +++ b/pkg/sql/logictest/testdata/logic_test/partial_index @@ -1809,7 +1809,7 @@ CREATE TABLE t61414_c ( FAMILY (k, a, c) ) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok UPSERT INTO t61414_c (k, a, b, d) VALUES (1, 2, 3, 4) diff --git a/pkg/sql/logictest/testdata/logic_test/partitioning b/pkg/sql/logictest/testdata/logic_test/partitioning index 0bcf009a71e2..71321890b9f6 100644 --- a/pkg/sql/logictest/testdata/logic_test/partitioning +++ b/pkg/sql/logictest/testdata/logic_test/partitioning @@ -1,6 +1,6 @@ -# 3node-tenant is blocked from running this file because the config runs with +# enterprise-configs are blocked from running this file because they run with # a CCL binary, so the expected failures from using a non-CCL binary don't occur. -# LogicTest: !3node-tenant-default-configs +# LogicTest: !enterprise-configs statement error pgcode XXC01 creating or manipulating partitions requires a CCL binary CREATE TABLE t (a INT, b INT, c INT, PRIMARY KEY (a, b)) PARTITION BY LIST (a) ( diff --git a/pkg/sql/logictest/testdata/logic_test/pg_builtins b/pkg/sql/logictest/testdata/logic_test/pg_builtins index 6444bdb7e765..4f3fbc153585 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_builtins +++ b/pkg/sql/logictest/testdata/logic_test/pg_builtins @@ -212,42 +212,42 @@ is_updatable b 123 2 28 is_updatable c 123 3 28 false is_updatable_view a 124 1 0 false is_updatable_view b 124 2 0 false -pg_class oid 4294967091 1 0 false -pg_class relname 4294967091 2 0 false -pg_class relnamespace 4294967091 3 0 false -pg_class reltype 4294967091 4 0 false -pg_class reloftype 4294967091 5 0 false -pg_class relowner 4294967091 6 0 false -pg_class relam 4294967091 7 0 false -pg_class relfilenode 4294967091 8 0 false -pg_class reltablespace 4294967091 9 0 false -pg_class relpages 4294967091 10 0 false -pg_class reltuples 4294967091 11 0 false -pg_class relallvisible 4294967091 12 0 false -pg_class reltoastrelid 4294967091 13 0 false -pg_class relhasindex 4294967091 14 0 false -pg_class relisshared 4294967091 15 0 false -pg_class relpersistence 4294967091 16 0 false -pg_class relistemp 4294967091 17 0 false -pg_class relkind 4294967091 18 0 false -pg_class relnatts 4294967091 19 0 false -pg_class relchecks 4294967091 20 0 false -pg_class relhasoids 4294967091 21 0 false -pg_class relhaspkey 4294967091 22 0 false -pg_class relhasrules 4294967091 23 0 false -pg_class relhastriggers 4294967091 24 0 false -pg_class relhassubclass 4294967091 25 0 false -pg_class relfrozenxid 4294967091 26 0 false -pg_class relacl 4294967091 27 0 false -pg_class reloptions 4294967091 28 0 false -pg_class relforcerowsecurity 4294967091 29 0 false -pg_class relispartition 4294967091 30 0 false -pg_class relispopulated 4294967091 31 0 false -pg_class relreplident 4294967091 32 0 false -pg_class relrewrite 4294967091 33 0 false -pg_class relrowsecurity 4294967091 34 0 false -pg_class relpartbound 4294967091 35 0 false -pg_class relminmxid 4294967091 36 0 false +pg_class oid 4294967090 1 0 false +pg_class relname 4294967090 2 0 false +pg_class relnamespace 4294967090 3 0 false +pg_class reltype 4294967090 4 0 false +pg_class reloftype 4294967090 5 0 false +pg_class relowner 4294967090 6 0 false +pg_class relam 4294967090 7 0 false +pg_class relfilenode 4294967090 8 0 false +pg_class reltablespace 4294967090 9 0 false +pg_class relpages 4294967090 10 0 false +pg_class reltuples 4294967090 11 0 false +pg_class relallvisible 4294967090 12 0 false +pg_class reltoastrelid 4294967090 13 0 false +pg_class relhasindex 4294967090 14 0 false +pg_class relisshared 4294967090 15 0 false +pg_class relpersistence 4294967090 16 0 false +pg_class relistemp 4294967090 17 0 false +pg_class relkind 4294967090 18 0 false +pg_class relnatts 4294967090 19 0 false +pg_class relchecks 4294967090 20 0 false +pg_class relhasoids 4294967090 21 0 false +pg_class relhaspkey 4294967090 22 0 false +pg_class relhasrules 4294967090 23 0 false +pg_class relhastriggers 4294967090 24 0 false +pg_class relhassubclass 4294967090 25 0 false +pg_class relfrozenxid 4294967090 26 0 false +pg_class relacl 4294967090 27 0 false +pg_class reloptions 4294967090 28 0 false +pg_class relforcerowsecurity 4294967090 29 0 false +pg_class relispartition 4294967090 30 0 false +pg_class relispopulated 4294967090 31 0 false +pg_class relreplident 4294967090 32 0 false +pg_class relrewrite 4294967090 33 0 false +pg_class relrowsecurity 4294967090 34 0 false +pg_class relpartbound 4294967090 35 0 false +pg_class relminmxid 4294967090 36 0 false # Check that the oid does not exist. If this test fail, change the oid here and in diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index d79e9a607f31..b0327c337ee8 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -428,9 +428,9 @@ SELECT * FROM pg_catalog.pg_namespace ---- oid nspname nspowner nspacl 4294967295 crdb_internal 3233629770 NULL -4294967190 information_schema 3233629770 NULL -4294967103 pg_catalog 3233629770 NULL -4294966973 pg_extension 3233629770 NULL +4294967189 information_schema 3233629770 NULL +4294967102 pg_catalog 3233629770 NULL +4294966972 pg_extension 3233629770 NULL 105 public 1546506610 NULL # Verify that we can still see the schemas even if we don't have any privilege @@ -447,9 +447,9 @@ SELECT * FROM pg_catalog.pg_namespace ---- oid nspname nspowner nspacl 4294967295 crdb_internal 3233629770 NULL -4294967190 information_schema 3233629770 NULL -4294967103 pg_catalog 3233629770 NULL -4294966973 pg_extension 3233629770 NULL +4294967189 information_schema 3233629770 NULL +4294967102 pg_catalog 3233629770 NULL +4294966972 pg_extension 3233629770 NULL 105 public 1546506610 NULL user root @@ -1161,6 +1161,17 @@ indexrelid indrelid indnatts indisunique indnullsnotdistinct indisprimary 1229708768 60 3 true false true false true false true false false true false 1 2 3 0 3403232968 0 0 0 0 2 2 2 NULL NULL 3 1276104432 12 2 true false true false true false true false false true false 1 6 0 0 0 0 2 2 NULL NULL 2 1322500096 28 1 true false true false true false true false false true false 1 0 0 2 NULL NULL 1 +1396653700 67 1 false false false false false false true false false true false 4 3403232968 0 2 NULL NULL 1 +1396653701 67 1 false false false false false false true false false true false 8 0 0 2 NULL NULL 1 +1396653702 67 1 false false false false false false true false false true false 3 3403232968 0 2 NULL NULL 1 +1396653703 67 1 false false false false false false true false false true false 5 3403232968 0 2 NULL NULL 1 +1396653704 67 2 false false false false false false true false false true false 13 2 0 0 0 0 1 2 NULL NULL 2 +1396653705 67 3 false false false false false false true false false true false 16 15 2 0 0 0 0 0 0 2 1 2 NULL NULL 3 +1396653706 67 2 false false false false false false true false false true false 6 2 0 0 0 0 1 2 NULL NULL 2 +1396653707 67 2 false false false false false false true false false true false 7 2 0 0 0 0 1 2 NULL NULL 2 +1396653708 67 2 false false false false false false true false false true false 9 2 0 0 0 0 1 2 NULL NULL 2 +1396653709 67 2 false false false false false false true false false true false 10 2 0 0 0 0 1 2 NULL NULL 2 +1396653711 67 2 true false true false true false true false false true false 1 2 0 0 0 0 2 2 NULL NULL 2 1489445036 35 9 false false false false false false true false false true false 2 1 3 6 7 8 9 10 11 0 0 0 0 2 2 NULL NULL 2 1489445039 35 1 true false true false true false true false false true false 1 0 0 2 NULL NULL 1 1535840703 51 1 true false true false true false true false false true false 1 0 0 2 NULL NULL 1 @@ -1324,6 +1335,25 @@ indexrelid operator_argument_type_oid operator_argument_position 1276104432 0 1 1276104432 0 2 1322500096 0 1 +1396653700 0 1 +1396653701 0 1 +1396653702 0 1 +1396653703 0 1 +1396653704 0 1 +1396653704 0 2 +1396653705 0 1 +1396653705 0 2 +1396653705 0 3 +1396653706 0 1 +1396653706 0 2 +1396653707 0 1 +1396653707 0 2 +1396653708 0 1 +1396653708 0 2 +1396653709 0 1 +1396653709 0 2 +1396653711 0 1 +1396653711 0 2 1489445036 0 1 1489445036 0 2 1489445039 0 1 @@ -1489,7 +1519,7 @@ SELECT * FROM pg_collation WHERE collname='en-US' ---- oid collname collnamespace collowner collencoding collcollate collctype collprovider collversion collisdeterministic -3903121477 en-US 4294967103 NULL 6 NULL NULL NULL NULL NULL +3903121477 en-US 4294967102 NULL 6 NULL NULL NULL NULL NULL user testuser @@ -1688,16 +1718,16 @@ FROM pg_catalog.pg_depend ORDER BY objid, refobjid, refobjsubid ---- classid objid objsubid refclassid refobjid refobjsubid deptype -4294967091 111 0 4294967091 110 14 a -4294967091 112 0 4294967091 110 15 a -4294967045 842401391 0 4294967091 110 1 n -4294967045 842401391 0 4294967091 110 2 n -4294967045 842401391 0 4294967091 110 3 n -4294967045 842401391 0 4294967091 110 4 n -4294967088 1179276562 0 4294967091 3687884464 0 n -4294967088 3935750373 0 4294967091 3687884465 0 n -4294967088 4072017905 0 4294967091 0 0 n -4294967088 4170826110 0 4294967091 0 0 n +4294967090 111 0 4294967090 110 14 a +4294967090 112 0 4294967090 110 15 a +4294967044 842401391 0 4294967090 110 1 n +4294967044 842401391 0 4294967090 110 2 n +4294967044 842401391 0 4294967090 110 3 n +4294967044 842401391 0 4294967090 110 4 n +4294967087 1179276562 0 4294967090 3687884464 0 n +4294967087 3935750373 0 4294967090 3687884465 0 n +4294967087 4072017905 0 4294967090 0 0 n +4294967087 4170826110 0 4294967090 0 0 n statement ok CREATE TABLE t_with_pk_seq (a INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, b INT); @@ -1735,9 +1765,9 @@ JOIN pg_class cla ON classid=cla.oid JOIN pg_class refcla ON refclassid=refcla.oid ---- classid refclassid tablename reftablename -4294967045 4294967091 pg_rewrite pg_class -4294967091 4294967091 pg_class pg_class -4294967088 4294967091 pg_constraint pg_class +4294967044 4294967090 pg_rewrite pg_class +4294967090 4294967090 pg_class pg_class +4294967087 4294967090 pg_constraint pg_class # Some entries in pg_depend are foreign key constraints that reference an index # in pg_class. Other entries are table-view dependencies @@ -1837,95 +1867,95 @@ WHERE oid < 4194967002 -- exclude implicit types for virtual tables ORDER BY oid ---- oid typname typnamespace typowner typlen typbyval typtype -16 bool 4294967103 NULL 1 true b -17 bytea 4294967103 NULL -1 false b -18 char 4294967103 NULL 1 true b -19 name 4294967103 NULL -1 false b -20 int8 4294967103 NULL 8 true b -21 int2 4294967103 NULL 2 true b -22 int2vector 4294967103 NULL -1 false b -23 int4 4294967103 NULL 4 true b -24 regproc 4294967103 NULL 4 true b -25 text 4294967103 NULL -1 false b -26 oid 4294967103 NULL 4 true b -30 oidvector 4294967103 NULL -1 false b -700 float4 4294967103 NULL 4 true b -701 float8 4294967103 NULL 8 true b -705 unknown 4294967103 NULL 0 true b -869 inet 4294967103 NULL 24 true b -1000 _bool 4294967103 NULL -1 false b -1001 _bytea 4294967103 NULL -1 false b -1002 _char 4294967103 NULL -1 false b -1003 _name 4294967103 NULL -1 false b -1005 _int2 4294967103 NULL -1 false b -1006 _int2vector 4294967103 NULL -1 false b -1007 _int4 4294967103 NULL -1 false b -1008 _regproc 4294967103 NULL -1 false b -1009 _text 4294967103 NULL -1 false b -1013 _oidvector 4294967103 NULL -1 false b -1014 _bpchar 4294967103 NULL -1 false b -1015 _varchar 4294967103 NULL -1 false b -1016 _int8 4294967103 NULL -1 false b -1021 _float4 4294967103 NULL -1 false b -1022 _float8 4294967103 NULL -1 false b -1028 _oid 4294967103 NULL -1 false b -1041 _inet 4294967103 NULL -1 false b -1042 bpchar 4294967103 NULL -1 false b -1043 varchar 4294967103 NULL -1 false b -1082 date 4294967103 NULL 4 true b -1083 time 4294967103 NULL 8 true b -1114 timestamp 4294967103 NULL 8 true b -1115 _timestamp 4294967103 NULL -1 false b -1182 _date 4294967103 NULL -1 false b -1183 _time 4294967103 NULL -1 false b -1184 timestamptz 4294967103 NULL 8 true b -1185 _timestamptz 4294967103 NULL -1 false b -1186 interval 4294967103 NULL 24 true b -1187 _interval 4294967103 NULL -1 false b -1231 _numeric 4294967103 NULL -1 false b -1266 timetz 4294967103 NULL 12 true b -1270 _timetz 4294967103 NULL -1 false b -1560 bit 4294967103 NULL -1 false b -1561 _bit 4294967103 NULL -1 false b -1562 varbit 4294967103 NULL -1 false b -1563 _varbit 4294967103 NULL -1 false b -1700 numeric 4294967103 NULL -1 false b -1790 refcursor 4294967103 NULL -1 false b -2201 _refcursor 4294967103 NULL -1 false b -2202 regprocedure 4294967103 NULL 4 true b -2205 regclass 4294967103 NULL 4 true b -2206 regtype 4294967103 NULL 4 true b -2207 _regprocedure 4294967103 NULL -1 false b -2210 _regclass 4294967103 NULL -1 false b -2211 _regtype 4294967103 NULL -1 false b -2249 record 4294967103 NULL 0 true p -2277 anyarray 4294967103 NULL -1 false p -2278 void 4294967103 NULL 0 true p -2279 trigger 4294967103 NULL 4 true p -2283 anyelement 4294967103 NULL -1 false p -2287 _record 4294967103 NULL -1 false b -2950 uuid 4294967103 NULL 16 true b -2951 _uuid 4294967103 NULL -1 false b -3220 pg_lsn 4294967103 NULL 8 true b -3221 _pg_lsn 4294967103 NULL -1 false b -3614 tsvector 4294967103 NULL -1 false b -3615 tsquery 4294967103 NULL -1 false b -3643 _tsvector 4294967103 NULL -1 false b -3645 _tsquery 4294967103 NULL -1 false b -3802 jsonb 4294967103 NULL -1 false b -3807 _jsonb 4294967103 NULL -1 false b -4089 regnamespace 4294967103 NULL 4 true b -4090 _regnamespace 4294967103 NULL -1 false b -4096 regrole 4294967103 NULL 4 true b -4097 _regrole 4294967103 NULL -1 false b -90000 geometry 4294967103 NULL -1 false b -90001 _geometry 4294967103 NULL -1 false b -90002 geography 4294967103 NULL -1 false b -90003 _geography 4294967103 NULL -1 false b -90004 box2d 4294967103 NULL 32 true b -90005 _box2d 4294967103 NULL -1 false b -90006 vector 4294967103 NULL -1 false b -90007 _vector 4294967103 NULL -1 false b +16 bool 4294967102 NULL 1 true b +17 bytea 4294967102 NULL -1 false b +18 char 4294967102 NULL 1 true b +19 name 4294967102 NULL -1 false b +20 int8 4294967102 NULL 8 true b +21 int2 4294967102 NULL 2 true b +22 int2vector 4294967102 NULL -1 false b +23 int4 4294967102 NULL 4 true b +24 regproc 4294967102 NULL 4 true b +25 text 4294967102 NULL -1 false b +26 oid 4294967102 NULL 4 true b +30 oidvector 4294967102 NULL -1 false b +700 float4 4294967102 NULL 4 true b +701 float8 4294967102 NULL 8 true b +705 unknown 4294967102 NULL 0 true b +869 inet 4294967102 NULL 24 true b +1000 _bool 4294967102 NULL -1 false b +1001 _bytea 4294967102 NULL -1 false b +1002 _char 4294967102 NULL -1 false b +1003 _name 4294967102 NULL -1 false b +1005 _int2 4294967102 NULL -1 false b +1006 _int2vector 4294967102 NULL -1 false b +1007 _int4 4294967102 NULL -1 false b +1008 _regproc 4294967102 NULL -1 false b +1009 _text 4294967102 NULL -1 false b +1013 _oidvector 4294967102 NULL -1 false b +1014 _bpchar 4294967102 NULL -1 false b +1015 _varchar 4294967102 NULL -1 false b +1016 _int8 4294967102 NULL -1 false b +1021 _float4 4294967102 NULL -1 false b +1022 _float8 4294967102 NULL -1 false b +1028 _oid 4294967102 NULL -1 false b +1041 _inet 4294967102 NULL -1 false b +1042 bpchar 4294967102 NULL -1 false b +1043 varchar 4294967102 NULL -1 false b +1082 date 4294967102 NULL 4 true b +1083 time 4294967102 NULL 8 true b +1114 timestamp 4294967102 NULL 8 true b +1115 _timestamp 4294967102 NULL -1 false b +1182 _date 4294967102 NULL -1 false b +1183 _time 4294967102 NULL -1 false b +1184 timestamptz 4294967102 NULL 8 true b +1185 _timestamptz 4294967102 NULL -1 false b +1186 interval 4294967102 NULL 24 true b +1187 _interval 4294967102 NULL -1 false b +1231 _numeric 4294967102 NULL -1 false b +1266 timetz 4294967102 NULL 12 true b +1270 _timetz 4294967102 NULL -1 false b +1560 bit 4294967102 NULL -1 false b +1561 _bit 4294967102 NULL -1 false b +1562 varbit 4294967102 NULL -1 false b +1563 _varbit 4294967102 NULL -1 false b +1700 numeric 4294967102 NULL -1 false b +1790 refcursor 4294967102 NULL -1 false b +2201 _refcursor 4294967102 NULL -1 false b +2202 regprocedure 4294967102 NULL 4 true b +2205 regclass 4294967102 NULL 4 true b +2206 regtype 4294967102 NULL 4 true b +2207 _regprocedure 4294967102 NULL -1 false b +2210 _regclass 4294967102 NULL -1 false b +2211 _regtype 4294967102 NULL -1 false b +2249 record 4294967102 NULL 0 true p +2277 anyarray 4294967102 NULL -1 false p +2278 void 4294967102 NULL 0 true p +2279 trigger 4294967102 NULL 4 true p +2283 anyelement 4294967102 NULL -1 false p +2287 _record 4294967102 NULL -1 false b +2950 uuid 4294967102 NULL 16 true b +2951 _uuid 4294967102 NULL -1 false b +3220 pg_lsn 4294967102 NULL 8 true b +3221 _pg_lsn 4294967102 NULL -1 false b +3614 tsvector 4294967102 NULL -1 false b +3615 tsquery 4294967102 NULL -1 false b +3643 _tsvector 4294967102 NULL -1 false b +3645 _tsquery 4294967102 NULL -1 false b +3802 jsonb 4294967102 NULL -1 false b +3807 _jsonb 4294967102 NULL -1 false b +4089 regnamespace 4294967102 NULL 4 true b +4090 _regnamespace 4294967102 NULL -1 false b +4096 regrole 4294967102 NULL 4 true b +4097 _regrole 4294967102 NULL -1 false b +90000 geometry 4294967102 NULL -1 false b +90001 _geometry 4294967102 NULL -1 false b +90002 geography 4294967102 NULL -1 false b +90003 _geography 4294967102 NULL -1 false b +90004 box2d 4294967102 NULL 32 true b +90005 _box2d 4294967102 NULL -1 false b +90006 vector 4294967102 NULL -1 false b +90007 _vector 4294967102 NULL -1 false b 100110 t1 109 1546506610 -1 false c 100111 t1_m_seq 109 1546506610 -1 false c 100112 t1_n_seq 109 1546506610 -1 false c @@ -2439,7 +2469,7 @@ FROM pg_catalog.pg_type WHERE oid = 1000 ---- oid typname typnamespace typowner typlen typbyval typtype -1000 _bool 4294967103 NULL -1 false b +1000 _bool 4294967102 NULL -1 false b query OTOOIBT colnames SELECT oid, typname, typnamespace, typowner, typlen, typbyval, typtype @@ -2497,7 +2527,7 @@ FROM pg_catalog.pg_type WHERE oid = $vtableSourceId ---- oid typname typnamespace typowner typlen typbyval typtype -4294967053 pg_proc 4294967103 3233629770 -1 false c +4294967052 pg_proc 4294967102 3233629770 -1 false c ## pg_catalog.pg_proc @@ -2508,14 +2538,14 @@ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE proname='substring' ---- proname pronamespace nspname proowner prolang procost prorows provariadic -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 -substring 4294967103 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 +substring 4294967102 pg_catalog NULL 12 NULL NULL 0 query TTBB colnames,rowsort SELECT proname, prokind, prosecdef, proleakproof @@ -2598,7 +2628,7 @@ WHERE proname='_pg_char_max_length' ORDER BY p.oid ---- proname prosrc pronamespace nspname prorettype proargtypes -_pg_char_max_length _pg_char_max_length 4294967190 information_schema 20 26 23 +_pg_char_max_length _pg_char_max_length 4294967189 information_schema 20 26 23 query TOIOTTB colnames SELECT proname, provariadic, pronargs, prorettype, proargtypes, proargmodes, proisstrict @@ -2725,14 +2755,14 @@ OR (c.relname = 'pg_namespace' AND d.objoid IN (SELECT oid FROM pg_catalog.pg_na ORDER BY d.objoid, description ---- relname objoid classoid objsubid description -pg_class 138 4294967091 0 mycomment1 -pg_class 138 4294967091 1 mycomment2 -pg_namespace 139 4294967062 0 mycomment4 -pg_proc 738 4294967053 0 Calculates the absolute value of `val`. -pg_proc 739 4294967053 0 Calculates the absolute value of `val`. -pg_proc 740 4294967053 0 Calculates the absolute value of `val`. -pg_class 385466581 4294967091 0 mycomment3 -pg_class 4294966975 4294967091 0 database users +pg_class 138 4294967090 0 mycomment1 +pg_class 138 4294967090 1 mycomment2 +pg_namespace 139 4294967061 0 mycomment4 +pg_proc 738 4294967052 0 Calculates the absolute value of `val`. +pg_proc 739 4294967052 0 Calculates the absolute value of `val`. +pg_proc 740 4294967052 0 Calculates the absolute value of `val`. +pg_class 385466581 4294967090 0 mycomment3 +pg_class 4294966974 4294967090 0 database users ## pg_catalog.pg_shdescription @@ -2743,7 +2773,7 @@ query OOT colnames SELECT objoid, classoid, description FROM pg_catalog.pg_shdescription ---- objoid classoid description -100 4294967085 mydbcomment +100 4294967084 mydbcomment ## pg_catalog.pg_event_trigger @@ -2911,6 +2941,7 @@ optimizer_always_use_histograms on N optimizer_hoist_uncorrelated_equality_subqueries on NULL NULL NULL string optimizer_merge_joins_enabled on NULL NULL NULL string optimizer_prove_implication_with_virtual_computed_columns on NULL NULL NULL string +optimizer_push_limit_into_project_filtered_scan on NULL NULL NULL string optimizer_push_offset_into_index_join on NULL NULL NULL string optimizer_use_conditional_hoist_fix on NULL NULL NULL string optimizer_use_forecasts on NULL NULL NULL string @@ -3102,6 +3133,7 @@ optimizer_always_use_histograms on N optimizer_hoist_uncorrelated_equality_subqueries on NULL user NULL on on optimizer_merge_joins_enabled on NULL user NULL on on optimizer_prove_implication_with_virtual_computed_columns on NULL user NULL on on +optimizer_push_limit_into_project_filtered_scan on NULL user NULL on on optimizer_push_offset_into_index_join on NULL user NULL on on optimizer_use_conditional_hoist_fix on NULL user NULL on on optimizer_use_forecasts on NULL user NULL on on @@ -3292,6 +3324,7 @@ optimizer_always_use_histograms NULL NULL NULL optimizer_hoist_uncorrelated_equality_subqueries NULL NULL NULL NULL NULL optimizer_merge_joins_enabled NULL NULL NULL NULL NULL optimizer_prove_implication_with_virtual_computed_columns NULL NULL NULL NULL NULL +optimizer_push_limit_into_project_filtered_scan NULL NULL NULL NULL NULL optimizer_push_offset_into_index_join NULL NULL NULL NULL NULL optimizer_use_conditional_hoist_fix NULL NULL NULL NULL NULL optimizer_use_forecasts NULL NULL NULL NULL NULL @@ -3414,7 +3447,7 @@ query OTOOTBBOOOOOOOO colnames SELECT * FROM pg_catalog.pg_operator where oprname='+' and oprleft='float8'::regtype ---- oid oprname oprnamespace oprowner oprkind oprcanmerge oprcanhash oprleft oprright oprresult oprcom oprnegate oprcode oprrest oprjoin -74817020 + 4294967103 NULL b false false 701 701 701 NULL NULL NULL NULL NULL +74817020 + 4294967102 NULL b false false 701 701 701 NULL NULL NULL NULL NULL # Verify proper functionality of system information functions. @@ -4318,7 +4351,7 @@ subtest end query TTI SELECT database_name, descriptor_name, descriptor_id from test.crdb_internal.create_statements where descriptor_name = 'pg_views' ---- -test pg_views 4294966974 +test pg_views 4294966973 # Verify INCLUDED columns appear in pg_index. See issue #59563 statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/pgcrypto_builtins b/pkg/sql/logictest/testdata/logic_test/pgcrypto_builtins index 4a7363557981..dda9f719b725 100644 --- a/pkg/sql/logictest/testdata/logic_test/pgcrypto_builtins +++ b/pkg/sql/logictest/testdata/logic_test/pgcrypto_builtins @@ -291,19 +291,19 @@ subtest end subtest ccl_functions -skipif config 3node-tenant-default-configs +skipif config enterprise-configs query error pgcode XXC01 encrypt can only be used with a CCL distribution SELECT encrypt('abc', 'key', 'aes') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs query error pgcode XXC01 encrypt_iv can only be used with a CCL distribution SELECT encrypt_iv('abc', 'key', '123', 'aes') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs query error pgcode XXC01 decrypt can only be used with a CCL distribution SELECT decrypt('\xdb5f149a7caf0cd275ca18c203a212c9', 'key', 'aes') -skipif config 3node-tenant-default-configs +skipif config enterprise-configs query error pgcode XXC01 decrypt_iv can only be used with a CCL distribution SELECT decrypt_iv('\x91b4ef63852013c8da53829da662b871', 'key', '123', 'aes') diff --git a/pkg/sql/logictest/testdata/logic_test/poison_after_push b/pkg/sql/logictest/testdata/logic_test/poison_after_push index 2205d8984d15..bc29c59f36c3 100644 --- a/pkg/sql/logictest/testdata/logic_test/poison_after_push +++ b/pkg/sql/logictest/testdata/logic_test/poison_after_push @@ -2,8 +2,8 @@ # not immediately poisoned when it revisits a Range on which one of # its intents has had its timestamp pushed. This allows it to continue # laying down intents in a single pass, despite the possibility that it -# will restart on commit. A SNAPSHOT transaction can always proceed and -# commit with its new timestamp. +# will restart on commit. A REPEATABLE READ transaction can always +# proceed and commit with its new timestamp. # # Note that ORDER BY id is done on selects which expect more than a # single result, to account for the distsql config, which randomly @@ -65,9 +65,10 @@ SELECT * FROM t ORDER BY id statement ok COMMIT -# The same type of session for a SNAPSHOT transaction shouldn't be poisoned. +# The same type of session for a REPEATABLE READ transaction shouldn't be +# poisoned. statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT, PRIORITY LOW +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, PRIORITY LOW statement ok INSERT INTO t VALUES (3) diff --git a/pkg/sql/logictest/testdata/logic_test/procedure_params b/pkg/sql/logictest/testdata/logic_test/procedure_params index d33f69b45396..35d818aa366e 100644 --- a/pkg/sql/logictest/testdata/logic_test/procedure_params +++ b/pkg/sql/logictest/testdata/logic_test/procedure_params @@ -223,6 +223,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p]; ---- CREATE PROCEDURE public.p(IN p1 INT8, INOUT p2 INT8, INOUT p3 INT8, OUT p4 INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT p2, p3, p1; $$ @@ -240,6 +241,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p]; ---- CREATE PROCEDURE public.p(OUT param INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -368,6 +370,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_3_in_2_out]; ---- CREATE PROCEDURE public.p_3_in_2_out(IN param1 INT8, OUT param1 INT8, IN param2 INT8, IN param3 INT8, OUT param2 INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (param1, param2 + param3); $$ @@ -390,6 +393,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_3_in_2_out]; ---- CREATE PROCEDURE public.p_3_in_2_out(INOUT param1 INT8, INOUT param2 INT8, IN param3 INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (param1, param2 + param3); $$ @@ -415,6 +419,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -439,6 +444,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT column3 INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -458,6 +464,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -477,6 +484,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_default_names]; ---- CREATE PROCEDURE public.p_default_names(OUT INT8, OUT param2 INT8, IN in_param INT8, OUT INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (in_param, 2, 3); $$ diff --git a/pkg/sql/logictest/testdata/logic_test/procedure_schema_change b/pkg/sql/logictest/testdata/logic_test/procedure_schema_change index d37390ad2898..84c82f1ac580 100644 --- a/pkg/sql/logictest/testdata/logic_test/procedure_schema_change +++ b/pkg/sql/logictest/testdata/logic_test/procedure_schema_change @@ -35,6 +35,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_new]; ---- CREATE PROCEDURE public.p_new() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -52,11 +53,13 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_int] ORDER BY 1 ---- CREATE PROCEDURE public.p_int() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ CREATE PROCEDURE public.p_int(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -71,6 +74,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_int] ORDER BY 1 ---- CREATE PROCEDURE public.p_int() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -80,6 +84,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE p_func] ---- CREATE PROCEDURE public.p_func(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -176,11 +181,13 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE public.p] ORDER BY 1 ---- CREATE PROCEDURE public.p() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ CREATE PROCEDURE public.p(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 2; $$ @@ -207,6 +214,7 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE public.p]; ---- CREATE PROCEDURE public.p() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -216,11 +224,13 @@ SELECT create_statement FROM [SHOW CREATE PROCEDURE sc.p] ORDER BY 1 ---- CREATE PROCEDURE sc.p() LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 3; $$ CREATE PROCEDURE sc.p(INT8) LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 2; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/ranges b/pkg/sql/logictest/testdata/logic_test/ranges index 660e9d477560..0e2cf7bd91d3 100644 --- a/pkg/sql/logictest/testdata/logic_test/ranges +++ b/pkg/sql/logictest/testdata/logic_test/ranges @@ -14,7 +14,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t ORDER BY 1 ---- start_key end_key replicas lease_holder - {1} 1 + {1} 1 statement ok ALTER TABLE t SPLIT AT VALUES (1), (10) @@ -24,7 +24,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/10 {1} 1 …/1/10 {1} 1 @@ -39,7 +39,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/10 {4} 4 …/1/10 {1} 1 @@ -51,7 +51,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/5/1 {4} 4 …/1/10 {1} 1 …/1/5/1 …/1/5/2 {4} 4 @@ -72,7 +72,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/5/1 {3,4} 3 …/1/10 {1} 1 …/1/5/1 …/1/5/2 {1,2,3} 1 @@ -230,7 +230,8 @@ c6 /Table/62 c7 /Table/ c7 /Table/63 c8 /Table/64 {1} 1 c8 /Table/64 c9 /Table/65 {1} 1 c9 /Table/65 ca /Table/66 {1} 1 -ca /Table/66 f28989 /Table/106/1/1 {1} 1 +ca /Table/66 cb /Table/67 {1} 1 +cb /Table/67 f28989 /Table/106/1/1 {1} 1 f28989 /Table/106/1/1 f2898d89 /Table/106/1/5/1 {3,4} 3 f2898d89 /Table/106/1/5/1 f2898d8a /Table/106/1/5/2 {1,2,3} 1 f2898d8a /Table/106/1/5/2 f2898d8b /Table/106/1/5/3 {2,3,5} 5 @@ -319,7 +320,8 @@ c6 /Table/62 c7 /Table/ c7 /Table/63 c8 /Table/64 {1} 1 c8 /Table/64 c9 /Table/65 {1} 1 c9 /Table/65 ca /Table/66 {1} 1 -ca /Table/66 f28989 /Table/106/1/1 {1} 1 +ca /Table/66 cb /Table/67 {1} 1 +cb /Table/67 f28989 /Table/106/1/1 {1} 1 f28989 /Table/106/1/1 f2898d89 /Table/106/1/5/1 {3,4} 3 f2898d89 /Table/106/1/5/1 f2898d8a /Table/106/1/5/2 {1,2,3} 1 f2898d8a /Table/106/1/5/2 f2898d8b /Table/106/1/5/3 {2,3,5} 5 diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl index 8f3a684480f8..1c6c1090fe9f 100644 --- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl +++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl @@ -56,15 +56,15 @@ subtest end subtest ttl_automatic_column_notice -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE tbl_ttl_automatic_column (id INT PRIMARY KEY) WITH (ttl_automatic_column = 'on') ---- NOTICE: ttl_automatic_column is no longer used. Setting ttl_expire_after automatically creates a TTL column. Resetting ttl_expire_after removes the automatically created column. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE tbl_ttl_automatic_column RESET (ttl_automatic_column) ---- @@ -74,15 +74,15 @@ subtest end subtest ttl_range_concurrency_notice -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE TABLE tbl_ttl_range_concurrency (id INT PRIMARY KEY) WITH (ttl_range_concurrency = 2) ---- NOTICE: ttl_range_concurrency is no longer configurable. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE tbl_ttl_range_concurrency RESET (ttl_range_concurrency) ---- diff --git a/pkg/sql/logictest/testdata/logic_test/schema_change_in_txn b/pkg/sql/logictest/testdata/logic_test/schema_change_in_txn index dd8eb7ae5f46..ab3529bf946f 100644 --- a/pkg/sql/logictest/testdata/logic_test/schema_change_in_txn +++ b/pkg/sql/logictest/testdata/logic_test/schema_change_in_txn @@ -1,7 +1,3 @@ -# This test is skipped under local-read-committed, since it uses the -# autocommit_before_ddl setting, which makes it impossible to test schema -# changes in transactions. - # Skip the rest of the test if a retry occurs. They can happen and are fine # but there's no way to encapsulate that in logictests. skip_on_retry @@ -747,7 +743,7 @@ SELECT status, failed ALTER TABLE test.public.customers ADD COLUMN i INT8 DEFAULT … ers (n) query BT -SELECT status % '(running)|(succeeded)', +SELECT status ~ '(running)|(succeeded)', regexp_replace(description, 'ROLL BACK JOB \d+.*', 'ROLL BACK JOB') as descr FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE GC' AND description LIKE 'GC for ROLL%' ORDER BY job_id DESC LIMIT 1 ---- @@ -2014,13 +2010,13 @@ statement ok BEGIN; SELECT 1; -skipif config local-read-committed +skipif config weak-iso-level-configs query T noticetrace ALTER TABLE t1 ADD COLUMN c INT DEFAULT 1 ---- NOTICE: auto-committing transaction before processing DDL due to autocommit_before_ddl setting -onlyif config local-read-committed +onlyif config weak-iso-level-configs query T noticetrace ALTER TABLE t1 ADD COLUMN c INT DEFAULT 1 ---- diff --git a/pkg/sql/logictest/testdata/logic_test/schema_locked b/pkg/sql/logictest/testdata/logic_test/schema_locked index 79f4bc6b3c44..2c348bfa0a94 100644 --- a/pkg/sql/logictest/testdata/logic_test/schema_locked +++ b/pkg/sql/logictest/testdata/logic_test/schema_locked @@ -69,7 +69,7 @@ DROP TABLE t subtest disallow_schema_changes_when_schema_is_locked statement ok -CREATE TABLE t (i INT PRIMARY KEY, j INT, INDEX idx (j)) WITH (schema_locked = t); +CREATE TABLE t (i INT PRIMARY KEY, j INT, UNIQUE INDEX idx (j)) WITH (schema_locked = t); statement ok INSERT INTO t SELECT i, i+1 FROM generate_series(1,10) AS tmp(i); @@ -98,9 +98,20 @@ DROP INDEX idx; statement error pgcode 57000 schema changes are disallowed on table "t" because it is locked CREATE INDEX idx2 ON t(j); +statement ok +CREATE TABLE ref (a INT PRIMARY KEY, b INT) + +# Locked tables cannot be referenced by foreign keys. +statement error pgcode 57000 schema changes are disallowed on table "t" because it is locked +ALTER TABLE ref ADD CONSTRAINT fk FOREIGN KEY (b) REFERENCES t(j); + +# GRANT statements are allowed on the table, as they only affect the +# table's privilege descriptor. statement ok GRANT DELETE ON TABLE t TO testuser WITH GRANT OPTION; +# COMMENT statements are allowed on the table, as they don't actually +# touch the descriptor. statement ok COMMENT ON TABLE t IS 't is a table'; COMMENT ON INDEX t@idx IS 'idx is an index'; diff --git a/pkg/sql/logictest/testdata/logic_test/select_for_share b/pkg/sql/logictest/testdata/logic_test/select_for_share index 9d0889242bef..17940c23c7e1 100644 --- a/pkg/sql/logictest/testdata/logic_test/select_for_share +++ b/pkg/sql/logictest/testdata/logic_test/select_for_share @@ -40,7 +40,7 @@ user testuser2 statement async writeReq count 1 UPDATE t SET a = 2 WHERE a = 1 -skipif config local-read-committed +skipif config weak-iso-level-configs query TTTTTTTBB colnames,retry,rowsort SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, isolation_level, granted, contended FROM crdb_internal.cluster_locks ---- @@ -58,6 +58,15 @@ test public t /Table/106/1/1/0 Shared Replica test public t /Table/106/1/1/0 Exclusive Replicated READ_COMMITTED false true test public t /Table/106/1/1/0 Shared Replicated READ_COMMITTED true true +onlyif config local-repeatable-read +query TTTTTTTBB colnames,retry,rowsort +SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, replace(isolation_level, ' ', '_') AS isolation_level, granted, contended FROM crdb_internal.cluster_locks +---- +database_name schema_name table_name lock_key_pretty lock_strength durability isolation_level granted contended +test public t /Table/106/1/1/0 Shared Replicated REPEATABLE_READ true true +test public t /Table/106/1/1/0 Exclusive Replicated REPEATABLE_READ false true +test public t /Table/106/1/1/0 Shared Replicated REPEATABLE_READ true true + # Commit the first transaction and rollback the second. user testuser @@ -128,14 +137,14 @@ user testuser2 statement ok SET enable_shared_locking_for_serializable = true -skipif config local-read-committed +skipif config weak-iso-level-configs query TTTTTTTBB colnames,retry,rowsort SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, isolation_level, granted, contended FROM crdb_internal.cluster_locks ---- database_name schema_name table_name lock_key_pretty lock_strength durability isolation_level granted contended test public t /Table/106/1/2/0 Shared Unreplicated SERIALIZABLE true false -onlyif config local-read-committed +onlyif config weak-iso-level-configs query TTTTTTTBB colnames,retry,rowsort SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, isolation_level, granted, contended FROM crdb_internal.cluster_locks ---- @@ -151,7 +160,7 @@ SELECT * FROM t FOR SHARE SKIP LOCKED user root -skipif config local-read-committed +skipif config weak-iso-level-configs query TTTTTTTBB colnames,retry,rowsort SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, isolation_level, granted, contended FROM crdb_internal.cluster_locks ---- @@ -159,7 +168,7 @@ database_name schema_name table_name lock_key_pretty lock_strength durabil test public t /Table/106/1/2/0 Shared Unreplicated SERIALIZABLE true false test public t /Table/106/1/2/0 Shared Unreplicated SERIALIZABLE true false -onlyif config local-read-committed +onlyif config weak-iso-level-configs query TTTTTTTBB colnames,retry,rowsort SELECT database_name, schema_name, table_name, lock_key_pretty, lock_strength, durability, isolation_level, granted, contended FROM crdb_internal.cluster_locks ---- diff --git a/pkg/sql/logictest/testdata/logic_test/select_for_update b/pkg/sql/logictest/testdata/logic_test/select_for_update index a02554cc860a..7fb2013ddcd4 100644 --- a/pkg/sql/logictest/testdata/logic_test/select_for_update +++ b/pkg/sql/logictest/testdata/logic_test/select_for_update @@ -1,4 +1,4 @@ -# LogicTest: !local-read-committed +# LogicTest: !weak-iso-level-configs # This test assumes that SERIALIZABLE isolation is being used. See # select_for_update_read_committed for READ COMMITTED testing. diff --git a/pkg/sql/logictest/testdata/logic_test/show_commit_timestamp b/pkg/sql/logictest/testdata/logic_test/show_commit_timestamp index f9263ca20317..bf879bea59dc 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_commit_timestamp +++ b/pkg/sql/logictest/testdata/logic_test/show_commit_timestamp @@ -327,8 +327,8 @@ show commit timestamp statement ok commit -# The timestamps are different under READ COMMITTED. -skipif config local-read-committed +# The timestamps are different under READ COMMITTED and REPEATABLE READ. +skipif config weak-iso-level-configs query IT SELECT i, CASE @@ -343,7 +343,7 @@ ORDER BY i ASC; 3 ts1 4 ts2 -onlyif config local-read-committed +onlyif config weak-iso-level-configs query IT SELECT i, CASE diff --git a/pkg/sql/logictest/testdata/logic_test/show_create_all_schemas b/pkg/sql/logictest/testdata/logic_test/show_create_all_schemas index 7adbe4854f49..32afbbbb25bb 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_create_all_schemas +++ b/pkg/sql/logictest/testdata/logic_test/show_create_all_schemas @@ -41,6 +41,17 @@ create_statement CREATE SCHEMA public; CREATE SCHEMA test2; +statement ok +COMMENT ON SCHEMA public IS 'test comment'; + +query T colnames,nosort +SHOW CREATE ALL SCHEMAS +---- +create_statement +CREATE SCHEMA public; +COMMENT ON SCHEMA public IS 'test comment'; +CREATE SCHEMA test2; + # Make sure database names with hyphens work well. statement ok CREATE DATABASE "d-d"; diff --git a/pkg/sql/logictest/testdata/logic_test/show_fingerprints b/pkg/sql/logictest/testdata/logic_test/show_fingerprints index 0886643118e0..a8448f694f97 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_fingerprints +++ b/pkg/sql/logictest/testdata/logic_test/show_fingerprints @@ -18,6 +18,28 @@ SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE t t_pkey -7903300865687235210 t_b_idx -5073888452016928166 +# Test excluded columns +query TT rowsort +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE t WITH EXCLUDE COLUMNS = ('c') +---- +t_pkey -2938394162542358272 +t_b_idx -5073888452016928166 + +# Test multiple excluded columns +query TT rowsort +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE t WITH EXCLUDE COLUMNS = ('a', 'b') +---- +t_pkey -3539648437866042702 +t_b_idx 590700560494856532 + +# START TIMESTAMP is only for VIRTUAL CLUSTERS +query error pgcode 22023 cannot use the START TIMESTAMP option when fingerprinting a table. +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE t WITH START TIMESTAMP = '132412341234.000000' + +# EXCLUDE COLUMNS is only for tables +query error pgcode 22023 cannot use the EXCLUDE COLUMNS option when fingerprinting a tenant. +SHOW EXPERIMENTAL_FINGERPRINTS FROM VIRTUAL CLUSTER t WITH EXCLUDE COLUMNS = ('a', 'b') + # Test a partial index. We expect this index to have the same fingerprint # as t_b_idx since the predicate covers all values. statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/show_ranges b/pkg/sql/logictest/testdata/logic_test/show_ranges index 046ad8911d2a..308ed0e0e21b 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_ranges +++ b/pkg/sql/logictest/testdata/logic_test/show_ranges @@ -197,11 +197,11 @@ SELECT start_key, end_key, range_id, split_enforced_until FROM [SHOW RANGES] ORDER BY range_id ---- start_key end_key range_id split_enforced_until -/Table/66 /Table/106/1/10 68 NULL -/Table/106/1/10 /Table/106/2/20 69 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/106/2/20 /Table/106/2/30 70 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/106/2/30 /Table/107/1/42 71 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/107/1/42 /Max 72 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/67 /Table/106/1/10 69 NULL +/Table/106/1/10 /Table/106/2/20 70 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/106/2/20 /Table/106/2/30 71 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/106/2/30 /Table/107/1/42 72 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/107/1/42 /Max 73 2262-04-11 23:47:16.854776 +0000 +0000 # Ditto, verbose form. query TTIIT colnames @@ -209,11 +209,11 @@ SELECT start_key, end_key, range_id, lease_holder, split_enforced_until FROM [SH ORDER BY range_id ---- start_key end_key range_id lease_holder split_enforced_until -/Table/66 /Table/106/1/10 68 1 NULL -/Table/106/1/10 /Table/106/2/20 69 1 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/106/2/20 /Table/106/2/30 70 1 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/106/2/30 /Table/107/1/42 71 1 2262-04-11 23:47:16.854776 +0000 +0000 -/Table/107/1/42 /Max 72 1 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/67 /Table/106/1/10 69 1 NULL +/Table/106/1/10 /Table/106/2/20 70 1 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/106/2/20 /Table/106/2/30 71 1 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/106/2/30 /Table/107/1/42 72 1 2262-04-11 23:47:16.854776 +0000 +0000 +/Table/107/1/42 /Max 73 1 2262-04-11 23:47:16.854776 +0000 +0000 # Show that the new tables shows up in the full range list. query TTITTTITITT colnames @@ -227,13 +227,13 @@ start_key end_key range_id database_name schema_name table_na /Table/25 /Table/26 28 system public replication_constraint_stats 25 primary 1 /Table/25/1 /Table/25/2 /Table/26 /Table/27 29 system public replication_critical_localities 26 primary 1 /Table/26/1 /Table/26/2 /Table/27 /Table/28 30 system public replication_stats 27 primary 1 /Table/27/1 /Table/27/2 -/Table/66 /Table/106/1/10 68 test public t 106 t_pkey 1 /Table/106/1 /Table/106/1/10 -/Table/106/1/10 /Table/106/2/20 69 test public t 106 t_pkey 1 /Table/106/1/10 /Table/106/2 -/Table/106/1/10 /Table/106/2/20 69 test public t 106 idx 2 /Table/106/2 /Table/106/2/20 -/Table/106/2/20 /Table/106/2/30 70 test public t 106 idx 2 /Table/106/2/20 /Table/106/2/30 -/Table/106/2/30 /Table/107/1/42 71 test public t 106 idx 2 /Table/106/2/30 /Table/106/3 -/Table/106/2/30 /Table/107/1/42 71 test public u 107 u_pkey 1 /Table/107/1 /Table/107/1/42 -/Table/107/1/42 /Max 72 test public u 107 u_pkey 1 /Table/107/1/42 /Table/107/2 +/Table/67 /Table/106/1/10 69 test public t 106 t_pkey 1 /Table/106/1 /Table/106/1/10 +/Table/106/1/10 /Table/106/2/20 70 test public t 106 t_pkey 1 /Table/106/1/10 /Table/106/2 +/Table/106/1/10 /Table/106/2/20 70 test public t 106 idx 2 /Table/106/2 /Table/106/2/20 +/Table/106/2/20 /Table/106/2/30 71 test public t 106 idx 2 /Table/106/2/20 /Table/106/2/30 +/Table/106/2/30 /Table/107/1/42 72 test public t 106 idx 2 /Table/106/2/30 /Table/106/3 +/Table/106/2/30 /Table/107/1/42 72 test public u 107 u_pkey 1 /Table/107/1 /Table/107/1/42 +/Table/107/1/42 /Max 73 test public u 107 u_pkey 1 /Table/107/1/42 /Table/107/2 subtest show_ranges_from_database/with_tables @@ -265,12 +265,12 @@ FROM [SHOW RANGES WITH TABLES] ORDER BY range_id ---- start_key end_key range_id schema_name table_name table_id table_start_key table_end_key -/Table/66 /Table/106/1/10 68 public t 106 /Table/106 /Table/106/1/10 -/Table/106/1/10 /Table/106/2/20 69 public t 106 /Table/106/1/10 /Table/106/2/20 -/Table/106/2/20 /Table/106/2/30 70 public t 106 /Table/106/2/20 /Table/106/2/30 -/Table/106/2/30 /Table/107/1/42 71 public t 106 /Table/106/2/30 /Table/107 -/Table/106/2/30 /Table/107/1/42 71 public u 107 /Table/107 /Table/107/1/42 -/Table/107/1/42 /Max 72 public u 107 /Table/107/1/42 /Table/108 +/Table/67 /Table/106/1/10 69 public t 106 /Table/106 /Table/106/1/10 +/Table/106/1/10 /Table/106/2/20 70 public t 106 /Table/106/1/10 /Table/106/2/20 +/Table/106/2/20 /Table/106/2/30 71 public t 106 /Table/106/2/20 /Table/106/2/30 +/Table/106/2/30 /Table/107/1/42 72 public t 106 /Table/106/2/30 /Table/107 +/Table/106/2/30 /Table/107/1/42 72 public u 107 /Table/107 /Table/107/1/42 +/Table/107/1/42 /Max 73 public u 107 /Table/107/1/42 /Table/108 subtest show_ranges_from_database/with_indexes @@ -303,13 +303,13 @@ FROM [SHOW RANGES WITH INDEXES] ORDER BY range_id, table_id, index_id ---- start_key end_key range_id schema_name table_name table_id index_name index_id index_start_key index_end_key -/Table/66 /Table/106/1/10 68 public t 106 t_pkey 1 /Table/106/1 /Table/106/1/10 -/Table/106/1/10 /Table/106/2/20 69 public t 106 t_pkey 1 /Table/106/1/10 /Table/106/2 -/Table/106/1/10 /Table/106/2/20 69 public t 106 idx 2 /Table/106/2 /Table/106/2/20 -/Table/106/2/20 /Table/106/2/30 70 public t 106 idx 2 /Table/106/2/20 /Table/106/2/30 -/Table/106/2/30 /Table/107/1/42 71 public t 106 idx 2 /Table/106/2/30 /Table/106/3 -/Table/106/2/30 /Table/107/1/42 71 public u 107 u_pkey 1 /Table/107/1 /Table/107/1/42 -/Table/107/1/42 /Max 72 public u 107 u_pkey 1 /Table/107/1/42 /Table/107/2 +/Table/67 /Table/106/1/10 69 public t 106 t_pkey 1 /Table/106/1 /Table/106/1/10 +/Table/106/1/10 /Table/106/2/20 70 public t 106 t_pkey 1 /Table/106/1/10 /Table/106/2 +/Table/106/1/10 /Table/106/2/20 70 public t 106 idx 2 /Table/106/2 /Table/106/2/20 +/Table/106/2/20 /Table/106/2/30 71 public t 106 idx 2 /Table/106/2/20 /Table/106/2/30 +/Table/106/2/30 /Table/107/1/42 72 public t 106 idx 2 /Table/106/2/30 /Table/106/3 +/Table/106/2/30 /Table/107/1/42 72 public u 107 u_pkey 1 /Table/107/1 /Table/107/1/42 +/Table/107/1/42 /Max 73 public u 107 u_pkey 1 /Table/107/1/42 /Table/107/2 subtest show_ranges_from_table @@ -341,10 +341,10 @@ SELECT start_key, end_key, range_id, split_enforced_until FROM [SHOW RANGES FROM ORDER BY range_id ---- start_key end_key range_id split_enforced_until - …/1/10 68 NULL -…/1/10 …/2/20 69 2262-04-11 23:47:16.854776 +0000 +0000 -…/2/20 …/2/30 70 2262-04-11 23:47:16.854776 +0000 +0000 -…/2/30 71 2262-04-11 23:47:16.854776 +0000 +0000 + …/1/10 69 NULL +…/1/10 …/2/20 70 2262-04-11 23:47:16.854776 +0000 +0000 +…/2/20 …/2/30 71 2262-04-11 23:47:16.854776 +0000 +0000 +…/2/30 72 2262-04-11 23:47:16.854776 +0000 +0000 # Ditto, verbose form. query TTIIT colnames @@ -352,10 +352,10 @@ SELECT start_key, end_key, range_id, lease_holder, split_enforced_until FROM [SH ORDER BY range_id ---- start_key end_key range_id lease_holder split_enforced_until - …/1/10 68 1 NULL -…/1/10 …/2/20 69 1 2262-04-11 23:47:16.854776 +0000 +0000 -…/2/20 …/2/30 70 1 2262-04-11 23:47:16.854776 +0000 +0000 -…/2/30 71 1 2262-04-11 23:47:16.854776 +0000 +0000 + …/1/10 69 1 NULL +…/1/10 …/2/20 70 1 2262-04-11 23:47:16.854776 +0000 +0000 +…/2/20 …/2/30 71 1 2262-04-11 23:47:16.854776 +0000 +0000 +…/2/30 72 1 2262-04-11 23:47:16.854776 +0000 +0000 # Let's inspect the other table for comparison. query TTIT colnames @@ -363,8 +363,8 @@ SELECT start_key, end_key, range_id, split_enforced_until FROM [SHOW RANGES FROM ORDER BY range_id ---- start_key end_key range_id split_enforced_until - …/1/42 71 2262-04-11 23:47:16.854776 +0000 +0000 -…/1/42 72 2262-04-11 23:47:16.854776 +0000 +0000 + …/1/42 72 2262-04-11 23:47:16.854776 +0000 +0000 +…/1/42 73 2262-04-11 23:47:16.854776 +0000 +0000 @@ -397,11 +397,11 @@ SELECT start_key, end_key, range_id, index_name, index_id, index_start_key, inde ORDER BY range_id, index_id ---- start_key end_key range_id index_name index_id index_start_key index_end_key - …/1/10 68 t_pkey 1 …/1 …/1/10 -…/1/10 …/2/20 69 t_pkey 1 …/1/10 …/2 -…/1/10 …/2/20 69 idx 2 …/2 …/2/20 -…/2/20 …/2/30 70 idx 2 …/2/20 …/2/30 -…/2/30 71 idx 2 …/2/30 …/3 + …/1/10 69 t_pkey 1 …/1 …/1/10 +…/1/10 …/2/20 70 t_pkey 1 …/1/10 …/2 +…/1/10 …/2/20 70 idx 2 …/2 …/2/20 +…/2/20 …/2/30 71 idx 2 …/2/20 …/2/30 +…/2/30 72 idx 2 …/2/30 …/3 @@ -433,18 +433,18 @@ query TTIT colnames SELECT start_key, end_key, range_id, split_enforced_until FROM [SHOW RANGES FROM INDEX t@idx] ORDER BY start_key ---- start_key end_key range_id split_enforced_until - …/20 69 2262-04-11 23:47:16.854776 +0000 +0000 -…/20 …/30 70 2262-04-11 23:47:16.854776 +0000 +0000 -…/30 71 2262-04-11 23:47:16.854776 +0000 +0000 + …/20 70 2262-04-11 23:47:16.854776 +0000 +0000 +…/20 …/30 71 2262-04-11 23:47:16.854776 +0000 +0000 +…/30 72 2262-04-11 23:47:16.854776 +0000 +0000 # Ditto, verbose form. query TTIIT colnames SELECT start_key, end_key, range_id, lease_holder, split_enforced_until FROM [SHOW RANGES FROM INDEX t@idx WITH DETAILS] ORDER BY start_key ---- start_key end_key range_id lease_holder split_enforced_until - …/20 69 1 2262-04-11 23:47:16.854776 +0000 +0000 -…/20 …/30 70 1 2262-04-11 23:47:16.854776 +0000 +0000 -…/30 71 1 2262-04-11 23:47:16.854776 +0000 +0000 + …/20 70 1 2262-04-11 23:47:16.854776 +0000 +0000 +…/20 …/30 71 1 2262-04-11 23:47:16.854776 +0000 +0000 +…/30 72 1 2262-04-11 23:47:16.854776 +0000 +0000 subtest cast_error diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index 358e3d0c2792..5c419ec90171 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -128,6 +128,7 @@ optimizer_always_use_histograms on optimizer_hoist_uncorrelated_equality_subqueries on optimizer_merge_joins_enabled on optimizer_prove_implication_with_virtual_computed_columns on +optimizer_push_limit_into_project_filtered_scan on optimizer_push_offset_into_index_join on optimizer_use_conditional_hoist_fix on optimizer_use_forecasts on @@ -352,6 +353,7 @@ public statement_diagnostics table node NULL public statement_diagnostics_requests table node NULL public statement_execution_insights table node NULL public statement_statistics table node NULL +public table_metadata table node NULL public table_statistics table node NULL public task_payloads table node NULL public tenant_id_seq sequence node NULL @@ -414,6 +416,7 @@ public statement_diagnostics table node NULL · public statement_diagnostics_requests table node NULL · public statement_execution_insights table node NULL · public statement_statistics table node NULL · +public table_metadata table node NULL · public table_statistics table node NULL · public task_payloads table node NULL · public tenant_id_seq sequence node NULL · @@ -561,7 +564,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM CURRENT_CATALOG WITH DETAILS] ---- start_key end_key replicas lease_holder -/Table/66 /Max {1} 1 +/Table/67 /Max {1} 1 query TTTI colnames SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE system.descriptor WITH DETAILS] diff --git a/pkg/sql/logictest/testdata/logic_test/show_tenant_fingerprints b/pkg/sql/logictest/testdata/logic_test/show_tenant_fingerprints index 13c9cfa02d5f..9a9fa7688f45 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_tenant_fingerprints +++ b/pkg/sql/logictest/testdata/logic_test/show_tenant_fingerprints @@ -1,7 +1,7 @@ -# LogicTest: !3node-tenant-default-configs !fakedist !fakedist-vec-off !fakedist-disk !local-read-committed -# This is skipped under local-read-committed, since it uses +# LogicTest: !3node-tenant-default-configs !fakedist !fakedist-vec-off !fakedist-disk !weak-iso-level-configs +# This is skipped under weak-iso-level-configs, since it uses # cluster_logical_timestamp() heavily, which does not work under -# READ COMMITTED. +# READ COMMITTED or REPEATABLE READ. # Because tenant initialization includes data tied to particular # timestamps, we can't easily make an assertion about the "correct" diff --git a/pkg/sql/logictest/testdata/logic_test/split_at b/pkg/sql/logictest/testdata/logic_test/split_at index 5f0d3b68dccf..8d685e7032c4 100644 --- a/pkg/sql/logictest/testdata/logic_test/split_at +++ b/pkg/sql/logictest/testdata/logic_test/split_at @@ -5,12 +5,14 @@ statement ok CREATE TABLE t (a INT PRIMARY KEY) +skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t WITH DETAILS] ORDER BY 1 ---- start_key end_key replicas lease_holder - {1} 1 + {1} 1 query TTT colnames,nosort ALTER TABLE t SPLIT AT VALUES (1), (10) @@ -19,12 +21,14 @@ key pretty split_enforced_until [242 137 137] /1 2262-04-11 23:47:16.854776 +0000 +0000 [242 137 146] /10 2262-04-11 23:47:16.854776 +0000 +0000 +skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE t WITH DETAILS] ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/10 {1} 1 …/1/10 {1} 1 diff --git a/pkg/sql/logictest/testdata/logic_test/sqlliveness b/pkg/sql/logictest/testdata/logic_test/sqlliveness index 5c9b91237a69..fc24f8dc4ec6 100644 --- a/pkg/sql/logictest/testdata/logic_test/sqlliveness +++ b/pkg/sql/logictest/testdata/logic_test/sqlliveness @@ -31,5 +31,3 @@ SELECT count(*) > 0 FROM system.sqlliveness WHERE crdb_internal.sql_liveness_is_ true subtest end - - diff --git a/pkg/sql/logictest/testdata/logic_test/strict_ddl_atomicity b/pkg/sql/logictest/testdata/logic_test/strict_ddl_atomicity index a583ea08b84f..529ee5e66411 100644 --- a/pkg/sql/logictest/testdata/logic_test/strict_ddl_atomicity +++ b/pkg/sql/logictest/testdata/logic_test/strict_ddl_atomicity @@ -52,10 +52,10 @@ ALTER TABLE testing ADD CONSTRAINT "unique_values" UNIQUE(v) statement ok ROLLBACK -skipif config local-read-committed +skipif config weak-iso-level-configs statement error unimplemented: cannot run this DDL statement inside a multi-statement transaction as its atomicity cannot be guaranteed.*\n.*\n.*issue-v/42061 SELECT 1; ALTER TABLE testing ADD CONSTRAINT "unique_values" UNIQUE(v) -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement error to use multi-statement transactions involving a schema change under weak isolation levels, enable the autocommit_before_ddl setting SELECT 1; ALTER TABLE testing ADD CONSTRAINT "unique_values" UNIQUE(v) diff --git a/pkg/sql/logictest/testdata/logic_test/system b/pkg/sql/logictest/testdata/logic_test/system index 139896b8faa7..6895266721cf 100644 --- a/pkg/sql/logictest/testdata/logic_test/system +++ b/pkg/sql/logictest/testdata/logic_test/system @@ -10,6 +10,7 @@ test root NULL NULL {} NULL # descriptor_id_sq, tenant, tenant_usage, and span_configurations. skipif config 3node-tenant-default-configs skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTT SELECT schema_name, table_name, type, owner, locality FROM [SHOW TABLES FROM system] ORDER BY 2 ---- @@ -55,6 +56,7 @@ public statement_diagnostics table node NULL public statement_diagnostics_requests table node NULL public statement_execution_insights table node NULL public statement_statistics table node NULL +public table_metadata table node NULL public table_statistics table node NULL public task_payloads table node NULL public tenant_id_seq sequence node NULL @@ -116,6 +118,7 @@ public statement_diagnostics table node NULL public statement_diagnostics_requests table node NULL public statement_execution_insights table node NULL public statement_statistics table node NULL +public table_metadata table node NULL public table_statistics table node NULL public task_payloads table node NULL public tenant_id_seq sequence node NULL @@ -135,6 +138,8 @@ public zones table node NULL # The test expectations are different on tenants because of # descriptor_id_sq, tenant, tenant_usage, and span_configurations. skipif config 3node-tenant-default-configs +skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query I rowsort SELECT id FROM system.descriptor ORDER BY 1 ---- @@ -195,6 +200,7 @@ SELECT id FROM system.descriptor ORDER BY 1 64 65 66 +67 100 101 102 @@ -263,6 +269,7 @@ SELECT id FROM system.descriptor ORDER BY 1 64 65 66 +67 100 101 102 @@ -399,6 +406,8 @@ system root CONNECT true # The test expectations are different on tenants because of # descriptor_id_sq, tenant, tenant_usage, and span_configurations. skipif config 3node-tenant-default-configs +skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query TTTTTB rowsort SHOW GRANTS ON system.* ---- @@ -695,6 +704,14 @@ system public statement_execution_insights root SELECT true system public statement_execution_insights root UPDATE true system public statement_statistics admin SELECT true system public statement_statistics root SELECT true +system public table_metadata admin DELETE true +system public table_metadata admin INSERT true +system public table_metadata admin SELECT true +system public table_metadata admin UPDATE true +system public table_metadata root DELETE true +system public table_metadata root INSERT true +system public table_metadata root SELECT true +system public table_metadata root UPDATE true system public table_statistics admin DELETE true system public table_statistics admin INSERT true system public table_statistics admin SELECT true @@ -1081,6 +1098,14 @@ system public statement_execution_insights root SELECT true system public statement_execution_insights root UPDATE true system public statement_statistics admin SELECT true system public statement_statistics root SELECT true +system public table_metadata admin DELETE true +system public table_metadata admin INSERT true +system public table_metadata admin SELECT true +system public table_metadata admin UPDATE true +system public table_metadata root DELETE true +system public table_metadata root INSERT true +system public table_metadata root SELECT true +system public table_metadata root UPDATE true system public table_statistics admin DELETE true system public table_statistics admin INSERT true system public table_statistics admin SELECT true @@ -1299,6 +1324,7 @@ WHERE name NOT LIKE 'sql.defaults%' AND name NOT LIKE 'sql.distsql%' AND name NOT LIKE 'sql.testing%' AND name NOT LIKE 'sql.stats%' +AND name NOT LIKE 'sql.txn.%_isolation.enabled' AND name != 'kv.range_merge.queue_enabled' ORDER BY name ---- @@ -1315,6 +1341,7 @@ WHERE name NOT LIKE 'sql.defaults%' AND name NOT LIKE 'sql.distsql%' AND name NOT LIKE 'sql.testing%' AND name NOT LIKE 'sql.stats%' +AND name NOT LIKE 'sql.txn.%_isolation.enabled' ORDER BY name ---- cluster.secret @@ -1335,6 +1362,7 @@ WHERE name NOT LIKE 'sql.defaults%' AND name NOT LIKE 'sql.distsql%' AND name NOT LIKE 'sql.testing%' AND name NOT LIKE 'sql.stats%' +AND name NOT LIKE 'sql.txn.%_isolation.enabled' AND name NOT IN ('version', 'cluster.secret', 'kv.range_merge.queue_enabled') ORDER BY name ---- @@ -1350,6 +1378,7 @@ WHERE name NOT LIKE 'sql.defaults%' AND name NOT LIKE 'sql.distsql%' AND name NOT LIKE 'sql.testing%' AND name NOT LIKE 'sql.stats%' +AND name NOT LIKE 'sql.txn.%_isolation.enabled' AND name NOT IN ('version', 'cluster.secret') ORDER BY name ---- diff --git a/pkg/sql/logictest/testdata/logic_test/system_namespace b/pkg/sql/logictest/testdata/logic_test/system_namespace index f063a86a68b8..33226a222fe9 100644 --- a/pkg/sql/logictest/testdata/logic_test/system_namespace +++ b/pkg/sql/logictest/testdata/logic_test/system_namespace @@ -1,21 +1,39 @@ skipif config 3node-tenant-default-configs +skipif config local-mixed-24.1 +skipif config local-mixed-24.2 query IITI rowsort SELECT * FROM system.namespace ---- -1 29 locations 21 -0 0 defaultdb 100 +1 29 ui 14 +1 29 users 4 +1 29 web_sessions 19 +1 29 zones 5 +100 0 public 101 +102 0 public 103 +104 0 public 105 +0 0 postgres 102 +0 0 system 1 +0 0 test 104 1 0 public 29 1 29 comments 24 1 29 database_role_settings 44 1 29 descriptor 3 +1 29 scheduled_jobs 37 +0 0 defaultdb 100 1 29 descriptor_id_seq 7 +1 29 settings 6 +1 29 span_configurations 47 1 29 eventlog 12 1 29 external_connections 53 1 29 job_info 54 1 29 jobs 15 1 29 join_tokens 41 1 29 lease 11 -100 0 public 101 +1 29 span_count 51 +1 29 span_stats_buckets 56 +1 29 span_stats_samples 57 +1 29 span_stats_tenant_boundaries 58 +1 29 locations 21 1 29 migrations 40 1 29 mvcc_statistics 64 1 29 namespace 30 @@ -23,49 +41,34 @@ SELECT * FROM system.namespace 1 29 protected_ts_meta 31 1 29 protected_ts_records 32 1 29 rangelog 13 +1 29 span_stats_unique_keys 55 +1 29 sql_instances 46 1 29 region_liveness 9 1 29 replication_constraint_stats 25 1 29 replication_critical_localities 26 1 29 replication_stats 27 -102 0 public 103 -104 0 public 105 -0 0 postgres 102 -0 0 system 1 -0 0 test 104 -1 29 statement_statistics 42 -1 29 table_statistics 20 -1 29 task_payloads 59 -1 29 tenant_id_seq 63 -1 29 tenant_settings 50 -1 29 tenant_tasks 60 -1 29 tenant_usage 45 -1 29 tenants 8 -1 29 transaction_activity 62 -1 29 transaction_execution_insights 65 -1 29 transaction_statistics 43 -1 29 ui 14 -1 29 users 4 -1 29 web_sessions 19 -1 29 zones 5 1 29 reports_meta 28 1 29 role_id_seq 48 1 29 role_members 23 1 29 role_options 33 -1 29 scheduled_jobs 37 -1 29 settings 6 -1 29 span_configurations 47 -1 29 span_count 51 -1 29 span_stats_buckets 56 -1 29 span_stats_samples 57 -1 29 span_stats_tenant_boundaries 58 -1 29 span_stats_unique_keys 55 -1 29 sql_instances 46 +1 29 transaction_activity 62 +1 29 transaction_execution_insights 65 +1 29 transaction_statistics 43 1 29 sqlliveness 39 1 29 statement_activity 61 1 29 statement_bundle_chunks 34 1 29 statement_diagnostics 36 1 29 statement_diagnostics_requests 35 1 29 statement_execution_insights 66 +1 29 statement_statistics 42 +1 29 table_metadata 67 +1 29 table_statistics 20 +1 29 task_payloads 59 +1 29 tenant_id_seq 63 +1 29 tenant_settings 50 +1 29 tenant_tasks 60 +1 29 tenant_usage 45 +1 29 tenants 8 # When run with a tenant, system.namespace has an extra entry for # descriptor_id_seq and no entries for tenants, tenant_usage, and @@ -121,6 +124,7 @@ SELECT * FROM system.namespace 1 29 statement_diagnostics_requests 35 1 29 statement_execution_insights 66 1 29 statement_statistics 42 +1 29 table_metadata 67 1 29 table_statistics 20 1 29 task_payloads 59 1 29 tenant_id_seq 63 diff --git a/pkg/sql/logictest/testdata/logic_test/table b/pkg/sql/logictest/testdata/logic_test/table index 6719d4837aee..9c09661044ec 100644 --- a/pkg/sql/logictest/testdata/logic_test/table +++ b/pkg/sql/logictest/testdata/logic_test/table @@ -585,6 +585,7 @@ databases NULL default_privileges NULL feature_usage NULL forward_dependencies NULL +fully_qualified_names NULL gossip_alerts NULL gossip_liveness NULL gossip_network NULL diff --git a/pkg/sql/logictest/testdata/logic_test/temp_table b/pkg/sql/logictest/testdata/logic_test/temp_table index ccf4f2614209..863119b0ed6d 100644 --- a/pkg/sql/logictest/testdata/logic_test/temp_table +++ b/pkg/sql/logictest/testdata/logic_test/temp_table @@ -150,14 +150,14 @@ SELECT * from pg_temp.view_on_temp 2 # A "permanent" view on a temporary table gets upgraded to temporary. -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace CREATE VIEW upgrade_temp_view AS SELECT a FROM temp_table ---- NOTICE: view "upgrade_temp_view" will be a temporary view -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok CREATE VIEW upgrade_temp_view AS SELECT a FROM temp_table diff --git a/pkg/sql/logictest/testdata/logic_test/tenant b/pkg/sql/logictest/testdata/logic_test/tenant index 6f00d23a1e7a..d60069293efd 100644 --- a/pkg/sql/logictest/testdata/logic_test/tenant +++ b/pkg/sql/logictest/testdata/logic_test/tenant @@ -378,137 +378,6 @@ DROP TENANT two statement ok DROP TENANT 'tenant-one' -subtest tenant_templates - -query T -SHOW CLUSTER SETTING sql.create_virtual_cluster.default_template ----- -· - -# Check we can't use the system tenant as template. -statement error using the system tenant as config template -CREATE TENANT othertenant LIKE system - -# Create some "interesting" tenant template. -statement ok -CREATE TENANT tmpl; - -let $tmplid -SELECT id FROM system.tenants WHERE name = 'tmpl' - -statement ok -ALTER TENANT tmpl GRANT CAPABILITY can_view_node_info; -- will be copied -ALTER TENANT tmpl SET CLUSTER SETTING trace.debug_http_endpoint.enabled = true; -- will be copied --- Simulate resource limits. Will be copied. --- Note: we cannot use the update_tenant_resource_limits() builtin --- directly here because it can only be used from a CCL binary. -INSERT INTO system.tenant_usage( - tenant_id, instance_id, next_instance_id, last_update, - ru_burst_limit, ru_refill_rate, ru_current, current_share_sum, total_consumption) -VALUES ($tmplid, 0, 0, now(), - 11, 22, 33, 44, ''::BYTES); - - -statement ok -ALTER TENANT tmpl START SERVICE SHARED; -- will not be copied. - -# Use it to create a new tenant. -statement ok -CREATE TENANT othertenant LIKE tmpl - -let $otherid -SELECT id FROM system.tenants WHERE name = 'othertenant' - -# Verify the service mode was not copied. -query BTTT -SELECT id = $otherid, name, data_state, service_mode FROM [SHOW TENANT othertenant] ----- -true othertenant ready none - -# Verify the new tenant has the same caps as the template -# (by showing there's no difference between the two) -query TT -SELECT capability_name, capability_value FROM [SHOW TENANT tmpl WITH CAPABILITIES] -EXCEPT SELECT capability_name, capability_value FROM [SHOW TENANT othertenant WITH CAPABILITIES]; ----- - -# Check that the setting overrides were copied. -query TTTT rowsort -SELECT variable, value, type, origin FROM [SHOW CLUSTER SETTINGS FOR TENANT othertenant] -WHERE origin != 'no-override' ----- -trace.debug_http_endpoint.enabled true b per-tenant-override - -# Check that the resource usage parameters were copied. -query IIRRRRI -SELECT instance_id, next_instance_id, - ru_burst_limit, ru_refill_rate, ru_current, - current_share_sum, length(total_consumption) -FROM system.tenant_usage WHERE tenant_id = $otherid ----- -0 0 11 22 33 0 0 - -# Clean up. -statement ok -DROP TENANT othertenant - -# Now set the default template and try again. -statement ok -SET CLUSTER SETTING sql.create_virtual_cluster.default_template = 'nonexistent'; - -statement error retrieving default tenant configuration template.*tenant "nonexistent" does not exist -CREATE TENANT othertenant - -statement ok -SET CLUSTER SETTING sql.create_virtual_cluster.default_template = 'tmpl'; - -# Create a new tenant - this should use the template implicitly now. -statement ok -CREATE TENANT othertenant - -let $otherid -SELECT id FROM system.tenants WHERE name = 'othertenant' - -# Verify the service mode was not copied. -query BTTT -SELECT id = $otherid, name, data_state, service_mode FROM [SHOW TENANT othertenant] ----- -true othertenant ready none - -query TT -SELECT capability_name, capability_value FROM [SHOW TENANT tmpl WITH CAPABILITIES] -EXCEPT SELECT capability_name, capability_value FROM [SHOW TENANT othertenant WITH CAPABILITIES]; ----- - -# Check the setting overrides were taken over. -query TTTT rowsort -SELECT variable, value, type, origin FROM [SHOW CLUSTER SETTINGS FOR TENANT othertenant] -WHERE origin != 'no-override' ----- -trace.debug_http_endpoint.enabled true b per-tenant-override - -# Check that the resource usage parameters were copied. -query IIRRRRI -SELECT instance_id, next_instance_id, - ru_burst_limit, ru_refill_rate, ru_current, - current_share_sum, length(total_consumption) -FROM system.tenant_usage WHERE tenant_id = $otherid ----- -0 0 11 22 33 0 0 - -# Clean up. -statement ok -DROP TENANT othertenant - -statement ok -ALTER TENANT tmpl STOP SERVICE - -statement ok -DROP TENANT tmpl - -statement ok -RESET CLUSTER SETTING sql.create_virtual_cluster.default_template - subtest regression_105115 statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/tenant_builtins b/pkg/sql/logictest/testdata/logic_test/tenant_builtins index 5186146b2222..205c5a78a8a3 100644 --- a/pkg/sql/logictest/testdata/logic_test/tenant_builtins +++ b/pkg/sql/logictest/testdata/logic_test/tenant_builtins @@ -1,4 +1,4 @@ -# LogicTest: !3node-tenant-default-configs +# LogicTest: !enterprise-configs query IBIT colnames SELECT id, active, length(info), name FROM system.tenants ORDER BY id diff --git a/pkg/sql/logictest/testdata/logic_test/tenant_from_tenant_hint b/pkg/sql/logictest/testdata/logic_test/tenant_from_tenant_hint index cac96c07a0e4..d2cc9636ea99 100644 --- a/pkg/sql/logictest/testdata/logic_test/tenant_from_tenant_hint +++ b/pkg/sql/logictest/testdata/logic_test/tenant_from_tenant_hint @@ -22,4 +22,3 @@ SET CLUSTER SETTING kv.rangefeed.enabled = true statement ok SET CLUSTER SETTING server.rangelog.ttl = '300s' - diff --git a/pkg/sql/logictest/testdata/logic_test/timestamp b/pkg/sql/logictest/testdata/logic_test/timestamp index 47827efe8152..3d912a63e1cf 100644 --- a/pkg/sql/logictest/testdata/logic_test/timestamp +++ b/pkg/sql/logictest/testdata/logic_test/timestamp @@ -532,7 +532,7 @@ SELECT to_timestamp(1646906263.123456), to_timestamp(1646906263), to_timestamp(' query TT SELECT to_timestamp('infinity'), to_timestamp('-infinity') ---- -294276-12-31 23:59:59.999999 +0000 UTC -4713-11-24 00:00:00 +0000 UTC +infinity -infinity ## Test for to_timestamp with NULL query T @@ -783,3 +783,48 @@ SET timezone = '04:15'; SELECT to_char(now(), 'of') as of_t, to_char(now(), 'tzh:tzm') as "tzh:tzm"; ---- -04:15 -04:15 + +# Test for issue #41564, the behavior of infinity should be consistent with PostgreSQL. +subtest infinity_41564 + +query T +SELECT 'infinity':::TIMESTAMP +---- +infinity + +query T +SELECT '-infinity':::TIMESTAMP +---- +-infinity + +# infinity is greater than any timestamp, including '294276-12-31 23:59:59.999999' (MaxSupportedTime) +query B +SELECT 'infinity'::timestamp > '294276-12-31 23:59:59.999999'::timestamp; +---- +true + +# -infinity is less than any timestamp, including '4714-11-24 00:00:00+00 BC' (MinSupportedTime) +query B +SELECT '-infinity'::timestamp < '4714-11-24 00:00:00+00 BC'::timestamp; +---- +true + +# infinity and -infinity should be equal to themselves +query BB +SELECT 'infinity'::timestamp = 'infinity'::timestamp, '-infinity'::timestamp = '-infinity'::timestamp; +---- +true true + +# infinity add/subtract any interval results in itself +query TT +SELECT 'infinity'::timestamp + '1 second'::interval, 'infinity'::timestamp - '1 second'::interval; +---- +infinity infinity + +# -infinity add/subtract any interval results in itself +query TT +SELECT '-infinity'::timestamp + '1 second'::interval, '-infinity'::timestamp - '1 second'::interval; +---- +-infinity -infinity + +subtest end diff --git a/pkg/sql/logictest/testdata/logic_test/txn b/pkg/sql/logictest/testdata/logic_test/txn index 0bfd64dd09f6..bf824a674d8d 100644 --- a/pkg/sql/logictest/testdata/logic_test/txn +++ b/pkg/sql/logictest/testdata/logic_test/txn @@ -185,10 +185,10 @@ ROLLBACK TRANSACTION # Set isolation level without a transaction is an error. statement error pgcode 25P01 there is no transaction in progress -SET TRANSACTION ISOLATION LEVEL SNAPSHOT +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT; COMMIT +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; COMMIT onlyif config enterprise-configs query T noticetrace @@ -208,7 +208,7 @@ statement ok BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; COMMIT statement ok -BEGIN TRANSACTION; SET TRANSACTION ISOLATION LEVEL SNAPSHOT; COMMIT +BEGIN TRANSACTION; SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; COMMIT statement ok BEGIN TRANSACTION; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; COMMIT @@ -239,11 +239,11 @@ statement ok UPDATE kv SET v = 'b' WHERE k in ('a') onlyif config enterprise-configs -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 25001 cannot change the isolation level of a running transaction SET TRANSACTION ISOLATION LEVEL READ COMMITTED -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement error pgcode 25001 cannot change the isolation level of a running transaction SET TRANSACTION ISOLATION LEVEL SERIALIZABLE @@ -257,11 +257,11 @@ statement ok SELECT * FROM kv LIMIT 1 onlyif config enterprise-configs -skipif config local-read-committed +skipif config weak-iso-level-configs statement error pgcode 25001 cannot change the isolation level of a running transaction SET transaction_isolation = 'READ COMMITTED' -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement error pgcode 25001 cannot change the isolation level of a running transaction SET transaction_isolation = 'SERIALIZABLE' @@ -271,7 +271,11 @@ ROLLBACK statement ok SET CLUSTER SETTING sql.txn.read_committed_isolation.enabled = false -# Transactions default to serializable when the read_committed cluster setting is off. +statement ok +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = false + +# Transactions default to serializable when the read_committed and +# repeatable_read cluster settings are off. statement ok BEGIN TRANSACTION @@ -295,9 +299,9 @@ SHOW TRANSACTION ISOLATION LEVEL ---- serializable -# SNAPSHOT is now mapped to serializable +# REPEATABLE READ is now mapped to serializable statement ok -SET TRANSACTION ISOLATION LEVEL SNAPSHOT +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ query T SHOW TRANSACTION ISOLATION LEVEL @@ -323,8 +327,9 @@ serializable statement ok COMMIT -# Since read_committed_isolation.enabled is false, setting isolation level -# to READ COMMITTED should map to SERIALIZABLE. +# Since read_committed_isolation.enabled and repeatable_read_isolation.enabled +# are both false, setting isolation level to READ COMMITTED should map to +# SERIALIZABLE. statement ok SET default_transaction_isolation = 'read committed' @@ -341,27 +346,59 @@ SHOW DEFAULT_TRANSACTION_ISOLATION ---- serializable -# SNAPSHOT can be used with a hidden cluster setting statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true + +# Since read_committed_isolation.enabled is false but repeatable_read_isolation.enabled +# is true, setting isolation level to READ COMMITTED should map to REPEATABLE READ. +statement ok +SET default_transaction_isolation = 'read committed' + +onlyif config enterprise-configs +query T +SHOW default_transaction_isolation +---- +repeatable read + +skipif config enterprise-configs +query T +SHOW default_transaction_isolation +---- +serializable +statement ok +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED + +onlyif config enterprise-configs +query T +SHOW DEFAULT_TRANSACTION_ISOLATION +---- +repeatable read + +skipif config enterprise-configs +query T +SHOW DEFAULT_TRANSACTION_ISOLATION +---- +serializable + +# Since repeatable_read_isolation.enabled is true, REPEATABLE READ can be used. statement ok BEGIN statement ok -SET TRANSACTION ISOLATION LEVEL SNAPSHOT +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ onlyif config enterprise-configs query T SHOW TRANSACTION ISOLATION LEVEL ---- -snapshot +repeatable read onlyif config enterprise-configs query T SHOW transaction_isolation ---- -snapshot +repeatable read skipif config enterprise-configs query T @@ -379,19 +416,19 @@ statement ok COMMIT # We can't set isolation level to an unsupported one. -statement error invalid value for parameter "transaction_isolation": "this is made up"\n.*Available values: serializable,snapshot +statement error invalid value for parameter "transaction_isolation": "this is made up"\n.*Available values: serializable,repeatable read SET transaction_isolation = 'this is made up' # We can explicitly start a transaction with isolation level specified. statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ onlyif config enterprise-configs query T SHOW TRANSACTION ISOLATION LEVEL ---- -snapshot +repeatable read skipif config enterprise-configs query T @@ -411,7 +448,7 @@ statement ok COMMIT statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = false +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = false statement ok SET CLUSTER SETTING sql.txn.read_committed_isolation.enabled = true @@ -709,15 +746,15 @@ normal # We can specify both isolation level and user priority. statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT, PRIORITY LOW; COMMIT +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, PRIORITY LOW; COMMIT statement ok -BEGIN TRANSACTION PRIORITY LOW, ISOLATION LEVEL SNAPSHOT; COMMIT +BEGIN TRANSACTION PRIORITY LOW, ISOLATION LEVEL REPEATABLE READ; COMMIT # We can explicitly start a transaction with specified isolation level and low user priority. statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT, PRIORITY LOW +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, PRIORITY LOW query T SHOW TRANSACTION ISOLATION LEVEL @@ -743,7 +780,7 @@ SHOW TRANSACTION PRIORITY high statement ok -SET TRANSACTION PRIORITY NORMAL, ISOLATION LEVEL SNAPSHOT +SET TRANSACTION PRIORITY NORMAL, ISOLATION LEVEL REPEATABLE READ query T SHOW TRANSACTION ISOLATION LEVEL @@ -759,16 +796,16 @@ statement ok COMMIT statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true statement ok -BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT, PRIORITY LOW +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ, PRIORITY LOW onlyif config enterprise-configs query T SHOW TRANSACTION ISOLATION LEVEL ---- -snapshot +repeatable read query T SHOW TRANSACTION PRIORITY @@ -791,8 +828,8 @@ high statement ok COMMIT -# With the snapshot_isolation.enabled cluster setting set to true, -# REPEATABLE READ gets mapped to SNAPSHOT if there is a valid license. +# With the repeatable_read_isolation.enabled cluster setting set to true, +# REPEATABLE READ can be used if there is a valid license. onlyif config enterprise-configs query T noticetrace SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ @@ -802,7 +839,7 @@ skipif config enterprise-configs query T noticetrace SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ ---- -NOTICE: SNAPSHOT isolation level is not allowed without an enterprise license; upgrading to SERIALIZABLE +NOTICE: REPEATABLE READ isolation level is not allowed without an enterprise license; upgrading to SERIALIZABLE statement ok SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ @@ -811,7 +848,7 @@ onlyif config enterprise-configs query T SHOW DEFAULT_TRANSACTION_ISOLATION ---- -snapshot +repeatable read skipif config enterprise-configs query T @@ -820,7 +857,7 @@ SHOW DEFAULT_TRANSACTION_ISOLATION serializable statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = false +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = false statement ok SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED @@ -922,18 +959,18 @@ SHOW default_transaction_isolation serializable statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true -# Since snapshot_isolation.enabled is true, setting isolation level to -# REPEATABLE READ should map to SNAPSHOT. +# Since repeatable_read_isolation.enabled is true, setting isolation level to +# SNAPSHOT should map to REPEATABLE READ. statement ok -SET default_transaction_isolation = 'repeatable read' +SET default_transaction_isolation = 'snapshot' onlyif config enterprise-configs query T SHOW default_transaction_isolation ---- -snapshot +repeatable read skipif config enterprise-configs query T @@ -942,13 +979,13 @@ SHOW default_transaction_isolation serializable statement ok -SET DEFAULT_TRANSACTION_ISOLATION TO 'SNAPSHOT' +SET DEFAULT_TRANSACTION_ISOLATION TO 'REPEATABLE READ' onlyif config enterprise-configs query T SHOW DEFAULT_TRANSACTION_ISOLATION ---- -snapshot +repeatable read skipif config enterprise-configs query T @@ -957,10 +994,10 @@ SHOW default_transaction_isolation serializable statement ok -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = false +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = false -# Since snapshot_isolation.enabled is false, setting isolation level to SNAPSHOT -# should map to SERIALIZABLE. +# Since repeatable_read_isolation.enabled is false, setting isolation level to +# REPEATABLE READ should map to SERIALIZABLE. statement ok SET default_transaction_isolation = 'repeatable read' @@ -970,7 +1007,7 @@ SHOW default_transaction_isolation serializable statement ok -SET DEFAULT_TRANSACTION_ISOLATION TO 'SNAPSHOT' +SET DEFAULT_TRANSACTION_ISOLATION TO 'REPEATABLE READ' query T SHOW DEFAULT_TRANSACTION_ISOLATION @@ -1010,7 +1047,7 @@ statement ok BEGIN TRANSACTION statement ok -SET TRANSACTION ISOLATION LEVEL SNAPSHOT +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ query T SHOW TRANSACTION ISOLATION LEVEL @@ -1174,7 +1211,7 @@ statement ok BEGIN TRANSACTION; SAVEPOINT cockroach_restart; SET TRANSACTION PRIORITY HIGH; - SET TRANSACTION ISOLATION LEVEL SNAPSHOT; + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; statement ok SELECT IF(nextval('s')<3, crdb_internal.force_retry('1h':::INTERVAL), 0) diff --git a/pkg/sql/logictest/testdata/logic_test/txn_retry b/pkg/sql/logictest/testdata/logic_test/txn_retry index 7d040c4a92fc..32ce25a563dc 100644 --- a/pkg/sql/logictest/testdata/logic_test/txn_retry +++ b/pkg/sql/logictest/testdata/logic_test/txn_retry @@ -25,8 +25,8 @@ user root # Run a cluster_logical_timestamp(), so that the transaction "observes its # commit timestamp" and so can't be refreshed, and the do an insert that will # cause the txn to be pushed. -# cluster_logical_timestamp() is not supported in local-read-committed. -skipif config local-read-committed +# cluster_logical_timestamp() is not supported in weak-iso-level-configs. +skipif config weak-iso-level-configs statement ok SELECT cluster_logical_timestamp(); INSERT INTO test_retry VALUES (1); diff --git a/pkg/sql/logictest/testdata/logic_test/udf b/pkg/sql/logictest/testdata/logic_test/udf index d63fcc1ba04f..83d0350782e5 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf +++ b/pkg/sql/logictest/testdata/logic_test/udf @@ -70,6 +70,7 @@ CREATE FUNCTION public.f_no_ref(a INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -103,6 +104,7 @@ CREATE FUNCTION public.f(a public.notmyworkday) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT a FROM test.public.t; SELECT b FROM test.public.t@t_idx_b; @@ -250,6 +252,7 @@ CREATE FUNCTION public.f_test_cor(a INT8, b INT8) LEAKPROOF STRICT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -268,6 +271,7 @@ CREATE FUNCTION public.f_test_cor(a INT8, b INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 2; $$ @@ -284,6 +288,7 @@ CREATE FUNCTION public.f_test_cor(a INT8, b INT8) LEAKPROOF STRICT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 3; $$ @@ -670,6 +675,7 @@ CREATE FUNCTION public.single_quote(s STRING) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (e'\'' || s) || e'\''; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_options b/pkg/sql/logictest/testdata/logic_test/udf_options index 0b7bc01d1931..e62faaa1946f 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_options +++ b/pkg/sql/logictest/testdata/logic_test/udf_options @@ -36,6 +36,11 @@ CREATE FUNCTION f() RETURNS INT IMMUTABLE LANGUAGE SQL AS $$ SELECT 1 $$ AS $$ S statement error pgcode 42601 pq: LANGUAGE SQL: conflicting or redundant options CREATE FUNCTION f() RETURNS INT IMMUTABLE LANGUAGE SQL LANGUAGE SQL AS $$ SELECT 1 $$; +statement error pgcode 42601 pq: SECURITY DEFINER: conflicting or redundant options +CREATE FUNCTION f() RETURNS INT IMMUTABLE SECURITY INVOKER SECURITY DEFINER LANGUAGE SQL AS $$ SELECT 1 $$; + +statement error pgcode 42601 pq: SECURITY INVOKER: conflicting or redundant options +CREATE FUNCTION f() RETURNS INT IMMUTABLE SECURITY INVOKER EXTERNAL SECURITY INVOKER LANGUAGE SQL AS $$ SELECT 1 $$; subtest volatility @@ -67,6 +72,7 @@ CREATE FUNCTION public.get_l(i INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT v FROM test.public.kv WHERE k = i; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_params b/pkg/sql/logictest/testdata/logic_test/udf_params index 83dcd0fc6375..5f87a3e4d189 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_params +++ b/pkg/sql/logictest/testdata/logic_test/udf_params @@ -15,6 +15,9 @@ CREATE FUNCTION f(OUT param INT) RETURNS VOID AS $$ SELECT 1; $$ LANGUAGE SQL; statement error pgcode 42P13 pq: function result type must be int because of OUT parameters CREATE FUNCTION f(OUT param INT) RETURNS RECORD AS $$ SELECT 1; $$ LANGUAGE SQL; +statement error pgcode 42P13 SQL functions cannot have arguments of type VOID +CREATE FUNCTION f(param VOID) RETURNS UUID LANGUAGE SQL AS $$ SELECT NULL $$; + statement ok CREATE FUNCTION f(OUT param INT) RETURNS INT AS $$ SELECT 1; $$ LANGUAGE SQL; @@ -95,6 +98,7 @@ CREATE FUNCTION public.f_param_types(IN p1 INT8, INOUT p2 INT8, INOUT p3 INT8, O NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT p2, p3, p1; $$ @@ -116,6 +120,7 @@ CREATE FUNCTION public.f_param_types(OUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -245,6 +250,7 @@ CREATE FUNCTION public.f_out_int(OUT param_new INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -295,6 +301,7 @@ CREATE FUNCTION public.f_int(INOUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT param; $$ @@ -323,6 +330,7 @@ CREATE FUNCTION public.f_int(IN param INT8, OUT param_out INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT param; $$ @@ -345,6 +353,7 @@ CREATE FUNCTION public.f_int(OUT param_out INT8, IN param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT param; $$ @@ -367,6 +376,7 @@ CREATE FUNCTION public.f_int(INOUT param INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT param; $$ @@ -406,6 +416,7 @@ CREATE FUNCTION public.f_3_in_2_out(IN param1 INT8, OUT param1 INT8, IN param2 I NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (param1, param2 + param3); $$ @@ -432,6 +443,7 @@ CREATE FUNCTION public.f_3_in_2_out(INOUT param1 INT8, INOUT param2 INT8, IN par NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (param1, param2 + param3); $$ @@ -462,6 +474,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT I NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -520,6 +533,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT c NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -537,6 +551,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN INT8, OUT I NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (1:::INT8, 2:::INT8, 3:::INT8); $$ @@ -554,6 +569,7 @@ CREATE FUNCTION public.f_default_names(OUT INT8, OUT param2 INT8, IN in_param IN NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT (in_param, 2, 3); $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_record b/pkg/sql/logictest/testdata/logic_test/udf_record index 4922e035cdcb..9fcdc6062db6 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_record +++ b/pkg/sql/logictest/testdata/logic_test/udf_record @@ -80,6 +80,7 @@ f_table CREATE FUNCTION public.f_table() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT t.a, t.b FROM test.public.t ORDER BY a LIMIT 1; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_regressions b/pkg/sql/logictest/testdata/logic_test/udf_regressions index 522a18d299f7..1be0e04eb47e 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_regressions +++ b/pkg/sql/logictest/testdata/logic_test/udf_regressions @@ -518,6 +518,7 @@ CREATE FUNCTION public.func104242() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1 FROM test.public.tab104242 WHERE NULL IN (); $$ @@ -536,6 +537,7 @@ CREATE FUNCTION public.func104242_not_null() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1 FROM test.public.tab104242 WHERE 'foo':::public.typ104242 IN (); $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_schema_change b/pkg/sql/logictest/testdata/logic_test/udf_schema_change index 57d8fcc1b246..93eea0087d8f 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_schema_change +++ b/pkg/sql/logictest/testdata/logic_test/udf_schema_change @@ -16,6 +16,7 @@ CREATE FUNCTION public.f_test_alter_opt(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -38,6 +39,7 @@ CREATE FUNCTION public.f_test_alter_opt(INT8) LEAKPROOF STRICT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -67,6 +69,7 @@ CREATE FUNCTION public.f_test_alter_name(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -98,6 +101,7 @@ CREATE FUNCTION public.f_test_alter_name_new(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -117,6 +121,7 @@ CREATE FUNCTION public.f_test_alter_name_diff_in() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -126,6 +131,7 @@ CREATE FUNCTION public.f_test_alter_name_diff_in(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -241,6 +247,7 @@ CREATE FUNCTION public.f_test_sc() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -250,6 +257,7 @@ CREATE FUNCTION public.f_test_sc(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 2; $$ @@ -291,6 +299,7 @@ CREATE FUNCTION public.f_test_sc() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 1; $$ @@ -304,6 +313,7 @@ CREATE FUNCTION test_alter_sc.f_test_sc() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 3; $$ @@ -313,6 +323,7 @@ CREATE FUNCTION test_alter_sc.f_test_sc(INT8) NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 2; $$ @@ -334,6 +345,7 @@ CREATE FUNCTION public.f_udt_rewrite() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT 'Monday':::public.notmyworkday; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_star b/pkg/sql/logictest/testdata/logic_test/udf_star index 4a221891eb47..c175689640df 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_star +++ b/pkg/sql/logictest/testdata/logic_test/udf_star @@ -111,6 +111,7 @@ f_subquery CREATE FUNCTION public.f_subquery() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT bar.a FROM (SELECT a FROM (SELECT t_onecol.a FROM test.public.t_onecol) AS foo) AS bar; $$ @@ -124,6 +125,7 @@ f_allcolsel_alias CREATE FUNCTION public.f_allcolsel_alias() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT t1.a, t1.b FROM test.public.t_twocol AS t1, test.public.t_twocol AS t2 WHERE t1.a = t2.a; $$ diff --git a/pkg/sql/logictest/testdata/logic_test/udf_unsupported b/pkg/sql/logictest/testdata/logic_test/udf_unsupported index 91a22a20eca9..d6d52ee9c787 100644 --- a/pkg/sql/logictest/testdata/logic_test/udf_unsupported +++ b/pkg/sql/logictest/testdata/logic_test/udf_unsupported @@ -57,6 +57,7 @@ CREATE FUNCTION public.test_vf_f() NOT LEAKPROOF CALLED ON NULL INPUT LANGUAGE SQL + SECURITY INVOKER AS $$ SELECT lower('hello':::STRING); $$ diff --git a/pkg/sql/logictest/testdata/logic_test/unique b/pkg/sql/logictest/testdata/logic_test/unique index f4568cdb0ff6..612434f42690 100644 --- a/pkg/sql/logictest/testdata/logic_test/unique +++ b/pkg/sql/logictest/testdata/logic_test/unique @@ -1,6 +1,6 @@ -# LogicTest: !local-read-committed -# READ COMMITTED does not work with UNIQUE WITHOUT INDEX constraints. -# See https://github.com/cockroachdb/cockroach/issues/110873. +# LogicTest: !weak-iso-level-configs +# READ COMMITTED and REPEATABLE READ do not work with UNIQUE WITHOUT INDEX +# constraints. See https://github.com/cockroachdb/cockroach/issues/110873. statement ok SET experimental_enable_unique_without_index_constraints = true diff --git a/pkg/sql/logictest/testdata/logic_test/vectoross b/pkg/sql/logictest/testdata/logic_test/vectoross index 0d5979cbfb4a..c5415a016fc3 100644 --- a/pkg/sql/logictest/testdata/logic_test/vectoross +++ b/pkg/sql/logictest/testdata/logic_test/vectoross @@ -1,4 +1,4 @@ -# LogicTest: !local-mixed-24.1 !3node-tenant +# LogicTest: !local-mixed-24.1 !enterprise-configs statement error OSS binaries do not include enterprise features CREATE TABLE v (v vector) diff --git a/pkg/sql/logictest/testdata/logic_test/virtual_columns b/pkg/sql/logictest/testdata/logic_test/virtual_columns index 4f101caab207..a4c66f7a3dd1 100644 --- a/pkg/sql/logictest/testdata/logic_test/virtual_columns +++ b/pkg/sql/logictest/testdata/logic_test/virtual_columns @@ -706,27 +706,27 @@ CREATE TABLE uniq_no_index ( UNIQUE WITHOUT INDEX (v) ) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok INSERT INTO uniq_no_index VALUES (1, 10), (2, 20) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint INSERT INTO uniq_no_index VALUES (3, 8) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint UPDATE uniq_no_index SET b=b+11 WHERE a < 2 -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint UPSERT INTO uniq_no_index VALUES (2, 30), (5, 6) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok INSERT INTO uniq_no_index VALUES (5, 6) ON CONFLICT (v) DO UPDATE SET b=15 -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 query III colnames,rowsort SELECT * FROM uniq_no_index ---- @@ -743,23 +743,23 @@ CREATE TABLE uniq_no_index_multi ( UNIQUE WITHOUT INDEX (v, c) ) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok INSERT INTO uniq_no_index_multi VALUES (1, 1, 1), (2, 4, 2), (3, 3, 3) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint INSERT INTO uniq_no_index_multi VALUES (4, 2, 2) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint UPDATE uniq_no_index_multi SET c=2 WHERE a=3 -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement ok UPSERT INTO uniq_no_index_multi VALUES (3, 3, 10) -skipif config local-read-committed 110873 +skipif config weak-iso-level-configs 110873 statement error duplicate key value violates unique constraint UPSERT INTO uniq_no_index_multi VALUES (3, 3, 2) diff --git a/pkg/sql/logictest/testdata/logic_test/virtual_table_privileges b/pkg/sql/logictest/testdata/logic_test/virtual_table_privileges index b8f37e1063c3..4306e1982405 100644 --- a/pkg/sql/logictest/testdata/logic_test/virtual_table_privileges +++ b/pkg/sql/logictest/testdata/logic_test/virtual_table_privileges @@ -84,14 +84,14 @@ user root statement ok CREATE DATABASE test2 -# Read committed emits extra notices, so skip it. -skipif config local-read-committed +# Weak isolation levels emit extra notices, so skip them. +skipif config weak-iso-level-configs query T noticetrace GRANT SELECT ON TABLE test2.information_schema.columns TO testuser ---- NOTICE: virtual table privileges are not database specific -onlyif config local-read-committed +onlyif config weak-iso-level-configs statement ok GRANT SELECT ON TABLE test2.information_schema.columns TO testuser diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant b/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant index 87ef8a172e90..86fc54b9875a 100644 --- a/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant +++ b/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant @@ -27,6 +27,7 @@ statement ok CREATE TABLE a(id INT PRIMARY KEY) # Check that global_reads cannot be set without a CCL binary and enterprise license. +skipif config enterprise-configs statement error OSS binaries do not include enterprise features ALTER TABLE a CONFIGURE ZONE USING global_reads = true diff --git a/pkg/sql/logictest/tests/5node-disk/generated_test.go b/pkg/sql/logictest/tests/5node-disk/generated_test.go index d09180aa818b..4b2a4cd8b143 100644 --- a/pkg/sql/logictest/tests/5node-disk/generated_test.go +++ b/pkg/sql/logictest/tests/5node-disk/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 8 +const configIdx = 9 var logicTestDir string diff --git a/pkg/sql/logictest/tests/5node/generated_test.go b/pkg/sql/logictest/tests/5node/generated_test.go index ee4a4650f212..d33b1041d8b3 100644 --- a/pkg/sql/logictest/tests/5node/generated_test.go +++ b/pkg/sql/logictest/tests/5node/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 7 +const configIdx = 8 var logicTestDir string diff --git a/pkg/sql/logictest/tests/cockroach-go-testserver-24.1/generated_test.go b/pkg/sql/logictest/tests/cockroach-go-testserver-24.1/generated_test.go index aa1c13cb9a23..8f501d873cf3 100644 --- a/pkg/sql/logictest/tests/cockroach-go-testserver-24.1/generated_test.go +++ b/pkg/sql/logictest/tests/cockroach-go-testserver-24.1/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 20 +const configIdx = 21 var logicTestDir string diff --git a/pkg/sql/logictest/tests/cockroach-go-testserver-24.2/generated_test.go b/pkg/sql/logictest/tests/cockroach-go-testserver-24.2/generated_test.go index 68604faa4f59..55aa6f1197c9 100644 --- a/pkg/sql/logictest/tests/cockroach-go-testserver-24.2/generated_test.go +++ b/pkg/sql/logictest/tests/cockroach-go-testserver-24.2/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 21 +const configIdx = 22 var logicTestDir string diff --git a/pkg/sql/logictest/tests/fakedist-disk/generated_test.go b/pkg/sql/logictest/tests/fakedist-disk/generated_test.go index 5eb5ec1d089b..102c324a2f70 100644 --- a/pkg/sql/logictest/tests/fakedist-disk/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist-disk/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 6 +const configIdx = 7 var logicTestDir string @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go b/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go index d98562cfd317..956c4091d289 100644 --- a/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist-vec-off/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 5 +const configIdx = 6 var logicTestDir string @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/fakedist/generated_test.go b/pkg/sql/logictest/tests/fakedist/generated_test.go index 43d466b572aa..40d87dd6a0a1 100644 --- a/pkg/sql/logictest/tests/fakedist/generated_test.go +++ b/pkg/sql/logictest/tests/fakedist/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 4 +const configIdx = 5 var logicTestDir string @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go b/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go index 4b1902b143c5..1231a0a6cdee 100644 --- a/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go +++ b/pkg/sql/logictest/tests/local-legacy-schema-changer/generated_test.go @@ -589,6 +589,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-mixed-24.1/generated_test.go b/pkg/sql/logictest/tests/local-mixed-24.1/generated_test.go index 43a74a8c0e72..5d697dbc409e 100644 --- a/pkg/sql/logictest/tests/local-mixed-24.1/generated_test.go +++ b/pkg/sql/logictest/tests/local-mixed-24.1/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 18 +const configIdx = 19 var logicTestDir string @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-mixed-24.2/generated_test.go b/pkg/sql/logictest/tests/local-mixed-24.2/generated_test.go index 234cc28020fb..6ef739501923 100644 --- a/pkg/sql/logictest/tests/local-mixed-24.2/generated_test.go +++ b/pkg/sql/logictest/tests/local-mixed-24.2/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 19 +const configIdx = 20 var logicTestDir string @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local-vec-off/generated_test.go b/pkg/sql/logictest/tests/local-vec-off/generated_test.go index 573c8aef5578..47c66a5daf0a 100644 --- a/pkg/sql/logictest/tests/local-vec-off/generated_test.go +++ b/pkg/sql/logictest/tests/local-vec-off/generated_test.go @@ -596,6 +596,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/local/generated_test.go b/pkg/sql/logictest/tests/local/generated_test.go index 8812cb46b7b3..eae6f2b132d3 100644 --- a/pkg/sql/logictest/tests/local/generated_test.go +++ b/pkg/sql/logictest/tests/local/generated_test.go @@ -617,6 +617,13 @@ func TestLogic_distinct_on( runLogicTest(t, "distinct_on") } +func TestLogic_distsql_automatic_partial_stats( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runLogicTest(t, "distsql_automatic_partial_stats") +} + func TestLogic_distsql_automatic_stats( t *testing.T, ) { diff --git a/pkg/sql/logictest/tests/multiregion-9node-3region-3azs/generated_test.go b/pkg/sql/logictest/tests/multiregion-9node-3region-3azs/generated_test.go index d0d1b0fd88ff..74cc2708d702 100644 --- a/pkg/sql/logictest/tests/multiregion-9node-3region-3azs/generated_test.go +++ b/pkg/sql/logictest/tests/multiregion-9node-3region-3azs/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 13 +const configIdx = 14 var logicTestDir string diff --git a/pkg/sql/logictest/tests/multiregion-invalid-locality/generated_test.go b/pkg/sql/logictest/tests/multiregion-invalid-locality/generated_test.go index 597f9cf9e53e..5ca38e14c2f1 100644 --- a/pkg/sql/logictest/tests/multiregion-invalid-locality/generated_test.go +++ b/pkg/sql/logictest/tests/multiregion-invalid-locality/generated_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 11 +const configIdx = 12 var logicTestDir string diff --git a/pkg/sql/nodestatus_string.go b/pkg/sql/nodestatus_string.go index c952045d0cab..0ff97f9073bb 100644 --- a/pkg/sql/nodestatus_string.go +++ b/pkg/sql/nodestatus_string.go @@ -10,6 +10,7 @@ func _() { var x [1]struct{} _ = x[NodeOK-0] _ = x[NodeUnhealthy-1] + _ = x[NodeDraining-2] } func (i NodeStatus) String() string { @@ -18,6 +19,8 @@ func (i NodeStatus) String() string { return "NodeOK" case NodeUnhealthy: return "NodeUnhealthy" + case NodeDraining: + return "NodeDraining" default: return "NodeStatus(" + strconv.FormatInt(int64(i), 10) + ")" } diff --git a/pkg/sql/opaque.go b/pkg/sql/opaque.go index f7208cf48769..e5a1153cdac4 100644 --- a/pkg/sql/opaque.go +++ b/pkg/sql/opaque.go @@ -270,8 +270,6 @@ func planOpaque(ctx context.Context, p *planner, stmt tree.Statement) (planNode, return p.ShowExternalConnection(ctx, n) case *tree.ShowHistogram: return p.ShowHistogram(ctx, n) - case *tree.ShowLogicalReplicationJobs: - return p.ShowLogicalReplicationJobs(ctx, n) case *tree.ShowTableStats: return p.ShowTableStats(ctx, n) case *tree.ShowTenant: @@ -401,7 +399,6 @@ func init() { &tree.ShowCreateExternalConnections{}, &tree.ShowExternalConnections{}, &tree.ShowHistogram{}, - &tree.ShowLogicalReplicationJobs{}, &tree.ShowTableStats{}, &tree.ShowTenant{}, &tree.ShowTraceForSession{}, diff --git a/pkg/sql/opt/exec/execbuilder/testdata/autocommit b/pkg/sql/opt/exec/execbuilder/testdata/autocommit index dcbbe7bfbc39..04a6b6b484d7 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/autocommit +++ b/pkg/sql/opt/exec/execbuilder/testdata/autocommit @@ -44,7 +44,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 # Multi-row insert should auto-commit. query B @@ -68,7 +68,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -93,7 +93,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 statement ok ROLLBACK @@ -120,7 +120,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -144,7 +144,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 # Insert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -170,8 +170,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -209,7 +209,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Put, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Put, 1 EndTxn to (n1,s1):1 # Multi-row upsert should auto-commit. query B @@ -233,7 +233,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -258,7 +258,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 statement ok ROLLBACK @@ -285,7 +285,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -310,8 +310,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 Put to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Upsert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -337,8 +337,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 Put to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -376,8 +376,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -402,8 +402,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 statement ok ROLLBACK @@ -430,8 +430,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Put, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Put, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -456,9 +456,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Put to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Update with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -484,9 +484,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Put to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Put to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Another way to test the scenario above: generate an error and ensure that the # mutation was not committed. @@ -524,7 +524,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Del, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Del, 1 EndTxn to (n1,s1):1 # Multi-row delete should auto-commit. query B @@ -548,7 +548,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 DelRng, 1 EndTxn to (n1,s1):1 # No auto-commit inside a transaction. statement ok @@ -573,7 +573,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 DelRng to (n1,s1):1 +dist sender send r69: sending batch 1 DelRng to (n1,s1):1 statement ok ROLLBACK @@ -600,8 +600,8 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Del, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Del, 1 EndTxn to (n1,s1):1 # TODO(radu): allow non-side-effecting projections. query B @@ -626,9 +626,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Del to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Del to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Insert with RETURNING statement with side-effects should not auto-commit. # In this case division can (in principle) error out. @@ -654,9 +654,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 2 Del to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 2 Del to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 statement ok INSERT INTO ab VALUES (12, 0); @@ -706,9 +706,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 2 Get to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 2 Get to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -732,10 +732,10 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 1 Put to (n1,s1):1 -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 1 Put to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -760,10 +760,10 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Get to (n1,s1):1 -dist sender send r68: sending batch 1 Del to (n1,s1):1 -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Get to (n1,s1):1 +dist sender send r69: sending batch 1 Del to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Test with a single cascade, which should use autocommit. statement ok @@ -794,9 +794,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 1 Del to (n1,s1):1 -dist sender send r68: sending batch 1 Scan to (n1,s1):1 -dist sender send r68: sending batch 1 Del, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Del to (n1,s1):1 +dist sender send r69: sending batch 1 Scan to (n1,s1):1 +dist sender send r69: sending batch 1 Del, 1 EndTxn to (n1,s1):1 # ----------------------- # Multiple mutation tests @@ -827,9 +827,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 query B SELECT count(*) > 0 FROM [ @@ -855,9 +855,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%ResolveIntent%' AND operation NOT LIKE '%async%' ---- -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 2 CPut to (n1,s1):1 -dist sender send r68: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 2 CPut to (n1,s1):1 +dist sender send r69: sending batch 1 EndTxn to (n1,s1):1 # Check that the statement can still be auto-committed when the txn rows written # erring guardrail is enabled. @@ -891,7 +891,7 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%QueryTxn%' AND message NOT LIKE '%ResolveIntent%' ---- -dist sender send r68: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 query error pq: txn has written 2 rows, which is above the limit INSERT INTO guardrails VALUES (2), (3) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/delete b/pkg/sql/opt/exec/execbuilder/testdata/delete index 2d3fba5bbf9e..25bbe515d101 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/delete +++ b/pkg/sql/opt/exec/execbuilder/testdata/delete @@ -231,9 +231,9 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%DelRange%' OR message LIKE '%DelRng%' ---- delete range DelRange /Table/110/1 - /Table/110/2 -dist sender send r68: sending batch 1 DelRng to (n1,s1):1 +dist sender send r69: sending batch 1 DelRng to (n1,s1):1 delete range DelRange /Table/110/1/601/0 - /Table/110/2 -dist sender send r68: sending batch 1 DelRng to (n1,s1):1 +dist sender send r69: sending batch 1 DelRng to (n1,s1):1 # Ensure that DelRange requests are autocommitted when DELETE FROM happens on a # chunk of fewer than 600 keys. @@ -252,7 +252,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%Del%' OR message LIKE '%sending batch%' ---- delete range Del /Table/110/1/5/0 -dist sender send r68: sending batch 1 Del, 1 EndTxn to (n1,s1):1 +dist sender send r69: sending batch 1 Del, 1 EndTxn to (n1,s1):1 # Ensure that we send DelRanges when doing a point delete operation on a table # that has multiple column families. @@ -271,7 +271,7 @@ SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE message LIKE '%Del%' OR message LIKE '%sending batch%' ---- delete range DelRange /Table/111/1/5 - /Table/111/1/6 -dist sender send r68: sending batch 1 DelRng to (n1,s1):1 +dist sender send r69: sending batch 1 DelRng to (n1,s1):1 statement ok CREATE TABLE xyz ( diff --git a/pkg/sql/opt/exec/execbuilder/testdata/dist_vectorize b/pkg/sql/opt/exec/execbuilder/testdata/dist_vectorize index d3893cc3effd..fb8732f83d19 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/dist_vectorize +++ b/pkg/sql/opt/exec/execbuilder/testdata/dist_vectorize @@ -30,7 +30,7 @@ query TTTI rowsort SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE kv WITH DETAILS] ORDER BY 1 ---- - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg index 23f7d3d4a246..65ba84037fbe 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg @@ -18,7 +18,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on b/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on index fac4dc66d4f2..761ee012c315 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_distinct_on @@ -48,7 +48,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE x ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/4 {2} 2 …/1/4 …/1/6 {3} 3 …/1/6 …/1/7 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_group_join b/pkg/sql/opt/exec/execbuilder/testdata/distsql_group_join index 6c81c3171cfb..664df4846d33 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_group_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_group_join @@ -18,7 +18,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin b/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin index 913340cb4955..cbcd591e2ece 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_indexjoin @@ -17,7 +17,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM INDEX t ORDER BY 1 ---- start_key end_key replicas lease_holder - …/10 {1} 1 + …/10 {1} 1 …/10 …/20 {2} 2 …/20 …/30 {3} 3 …/30 …/40 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_inverted_index b/pkg/sql/opt/exec/execbuilder/testdata/distsql_inverted_index index a64bb040dd89..0daf18b12726 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_inverted_index +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_inverted_index @@ -65,7 +65,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX json_tab@primary WITH DETAILS] ORDER BY lease_holder, start_key ---- start_key end_key replicas lease_holder - …/10 {1} 1 + …/10 {1} 1 …/10 …/20 {2} 2 …/20 …/ {3} 3 @@ -74,7 +74,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE json_tab WITH DETAILS] ORDER BY lease_holder, start_key ---- start_key end_key replicas lease_holder - …/1/10 {1} 1 + …/1/10 {1} 1 …/2 …/3 {1} 1 …/3 {1} 1 …/1/10 …/1/20 {2} 2 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_join b/pkg/sql/opt/exec/execbuilder/testdata/distsql_join index c09721f23175..7c02ea0f8cfa 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_join @@ -18,7 +18,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_merge_join b/pkg/sql/opt/exec/execbuilder/testdata/distsql_merge_join index fb2ebeeb6e6e..ada08f81e134 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_merge_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_merge_join @@ -107,7 +107,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE p ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/8 {1} 1 + …/1/8 {1} 1 …/1/16 …/1/24 {1} 1 …/1/24 …/1/32 {1} 1 …/1/32 {1} 1 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc b/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc index 52f20dd7ef63..085d15c7ff0b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_misc @@ -73,7 +73,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables b/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables index 83bbfef52433..9f09703fbda1 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_numtables @@ -28,14 +28,14 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE N ORDER BY 1 ---- start_key end_key replicas lease_holder - {1} 1 + {1} 1 query TTTI colnames,rowsort SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE NumToStr WITH DETAILS] ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2000 {1} 1 + …/1/2000 {1} 1 …/1/2000 …/1/4000 {2} 2 …/1/4000 …/1/6000 {3} 3 …/1/6000 …/1/8000 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_ordinality b/pkg/sql/opt/exec/execbuilder/testdata/distsql_ordinality index 2762ec0ad4bd..5e5ed55e79e4 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_ordinality +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_ordinality @@ -25,7 +25,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE x ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/4 {2} 2 …/1/4 …/1/6 {3} 3 …/1/6 …/1/7 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans b/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans index a03ec927e5da..afcaffb185a0 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_tighten_spans @@ -97,7 +97,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE p ORDER BY lease_holder, start_key ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/2/0 {3} 3 …/2/0 …/2/2 {4} 4 …/2/2 {5} 5 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_union b/pkg/sql/opt/exec/execbuilder/testdata/distsql_union index acbd1589fafa..80fed52ee199 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_union +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_union @@ -23,7 +23,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE x ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 …/1/4 …/1/5 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_window b/pkg/sql/opt/exec/execbuilder/testdata/distsql_window index a991727d6574..7e65bce17b60 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_window +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_window @@ -18,7 +18,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/execute_internally_builtin b/pkg/sql/opt/exec/execbuilder/testdata/execute_internally_builtin index 4f121551bd6b..3c9d9d6a1315 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/execute_internally_builtin +++ b/pkg/sql/opt/exec/execbuilder/testdata/execute_internally_builtin @@ -215,3 +215,37 @@ root statement ok SELECT crdb_internal.execute_internally('EXPLAIN ANALYZE SELECT 1;'); + +# Ensure that StmtTimeout for a session-independent IE cannot be overriden. +subtest stmt_timeout + +statement ok +SET CLUSTER SETTING sql.defaults.statement_timeout = '36000000ms'; + +statement ok +SET statement_timeout = '39600000ms'; + +query T +SELECT crdb_internal.execute_internally('SHOW statement_timeout;'); +---- +0 + +# Ensure that a session-bound IE still inherits from session vars, if available; +# otherwise, it inherits from the cluster setting. +query T +SELECT crdb_internal.execute_internally('SHOW statement_timeout;', true); +---- +39600000 + +statement ok +RESET statement_timeout; + +query T +SELECT crdb_internal.execute_internally('SHOW statement_timeout;', true); +---- +36000000 + +statement ok +RESET CLUSTER SETTING sql.defaults.statement_timeout; + +subtest end diff --git a/pkg/sql/opt/exec/execbuilder/testdata/experimental_distsql_planning_5node b/pkg/sql/opt/exec/execbuilder/testdata/experimental_distsql_planning_5node index 3c52d14312dc..e5ae6e2f2637 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/experimental_distsql_planning_5node +++ b/pkg/sql/opt/exec/execbuilder/testdata/experimental_distsql_planning_5node @@ -23,7 +23,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE k ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain b/pkg/sql/opt/exec/execbuilder/testdata/explain index 99ff9356554a..705563997235 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain @@ -447,7 +447,7 @@ vectorized: true │ │ └── • render │ │ │ │ │ └── • filter - │ │ │ filter: classoid = 4294967091 + │ │ │ filter: classoid = 4294967090 │ │ │ │ │ └── • virtual table │ │ table: kv_catalog_comments@primary diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain_analyze_plans b/pkg/sql/opt/exec/execbuilder/testdata/explain_analyze_plans index 650f1238eb12..2f886bf7cf8f 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain_analyze_plans +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain_analyze_plans @@ -37,7 +37,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE k ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 …/1/4 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_filter_geospatial_dist b/pkg/sql/opt/exec/execbuilder/testdata/inverted_filter_geospatial_dist index dded0be54424..97f78435c04b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_filter_geospatial_dist +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_filter_geospatial_dist @@ -104,7 +104,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX g ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1152921574000000000 {1} 1 + …/1152921574000000000 {1} 1 …/1152921574000000000 {2} 2 # Distributed. TODO(treilly): This claims to be distributed, but it isn't. What gives? @@ -174,7 +174,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX g ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1152921574000000000 {2} 2 + …/1152921574000000000 {2} 2 …/1152921574000000000 {2} 2 # Filtering is placed at node 2. We need a retry here to account for possibly @@ -246,7 +246,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX g ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1152921574000000000 {2} 2 + …/1152921574000000000 {2} 2 …/1152921574000000000 {2} 2 query ITTTI colnames,rowsort @@ -254,7 +254,7 @@ SELECT range_id, start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FR ORDER BY lease_holder, start_key ---- range_id start_key end_key replicas lease_holder -68 {2} 2 +69 {2} 2 # We should see a distributed execution (though need to retry to purge possibly # stale dist sender caches). diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist index 92381ed15ec0..79545b3e6e63 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist @@ -39,7 +39,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW EXPERIMENTAL_RANGES FROM TABLE ltable WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist_vec b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist_vec index 64bcc7d74168..7fef637b3906 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist_vec +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_geospatial_dist_vec @@ -39,7 +39,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW EXPERIMENTAL_RANGES FROM TABLE ltable WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_json_array_dist b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_json_array_dist index 0de731061b84..b96ccaf85e98 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_json_array_dist +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_json_array_dist @@ -65,7 +65,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM INDEX json_tab@primary WITH DETAILS] ORDER BY lease_holder, start_key ---- start_key end_key replicas lease_holder - …/10 {1} 1 + …/10 {1} 1 …/10 …/20 {2} 2 …/20 …/ {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_multi_column_dist b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_multi_column_dist index 98edac0ec898..c6369a5521fc 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_multi_column_dist +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_join_multi_column_dist @@ -81,7 +81,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW EXPERIMENTAL_RANGES FROM TABLE j2 WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/44 {1} 1 + …/1/44 {1} 1 …/1/44 …/1/88 {2} 2 …/1/88 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join index d92223694d45..cfff5af29020 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join @@ -429,7 +429,7 @@ SELECT start_key, end_key, replicas, lease_holder from [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/merge_join_dist_vec b/pkg/sql/opt/exec/execbuilder/testdata/merge_join_dist_vec index 1d94728225cd..75d7b5389f93 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/merge_join_dist_vec +++ b/pkg/sql/opt/exec/execbuilder/testdata/merge_join_dist_vec @@ -30,7 +30,7 @@ query TTTI colnames SELECT start_key, end_key, replicas, lease_holder from [SHOW EXPERIMENTAL_RANGES FROM TABLE l WITH DETAILS] ORDER BY lease_holder ---- start_key end_key replicas lease_holder - …/1/2 {1} 1 + …/1/2 {1} 1 …/1/2 …/1/3 {2} 2 …/1/3 {3} 3 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/scan_parallel b/pkg/sql/opt/exec/execbuilder/testdata/scan_parallel index 5ee36104e3ae..839fe25573ff 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/scan_parallel +++ b/pkg/sql/opt/exec/execbuilder/testdata/scan_parallel @@ -18,7 +18,7 @@ SELECT start_key, end_key, replicas, lease_holder FROM [SHOW RANGES FROM TABLE d ORDER BY 1 ---- start_key end_key replicas lease_holder - …/1/1 {1} 1 + …/1/1 {1} 1 …/1/1 …/1/2 {2} 2 …/1/2 …/1/3 {3} 3 …/1/3 …/1/4 {4} 4 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/show_trace b/pkg/sql/opt/exec/execbuilder/testdata/show_trace index b02249dc98e7..ac82f35a42ab 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/show_trace +++ b/pkg/sql/opt/exec/execbuilder/testdata/show_trace @@ -228,7 +228,7 @@ SET tracing = off query T SELECT message FROM [SHOW TRACE FOR SESSION] WHERE message LIKE e'%1 CPut, 1 EndTxn%' AND message NOT LIKE e'%proposing command%' ---- -r70: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 +r71: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 node received request: 1 CPut, 1 EndTxn # Check that we can run set tracing regardless of the current tracing state. @@ -312,9 +312,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r70: sending batch 1 CPut to (n1,s1):1 -dist sender send r70: sending batch 1 EndTxn to (n1,s1):1 -dist sender send r70: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r71: sending batch 1 CPut to (n1,s1):1 +dist sender send r71: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r71: sending batch 2 CPut, 1 EndTxn to (n1,s1):1 # Make another session trace. statement ok @@ -343,9 +343,9 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r70: sending batch 4 CPut, 1 EndTxn to (n1,s1):1 -dist sender send r70: sending batch 5 CPut to (n1,s1):1 -dist sender send r70: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r71: sending batch 4 CPut, 1 EndTxn to (n1,s1):1 +dist sender send r71: sending batch 5 CPut to (n1,s1):1 +dist sender send r71: sending batch 1 EndTxn to (n1,s1):1 # make a table with some big strings in it. statement ok @@ -366,11 +366,11 @@ WHERE message LIKE '%r$rangeid: sending batch%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r70: sending batch 6 CPut to (n1,s1):1 -dist sender send r70: sending batch 6 CPut to (n1,s1):1 -dist sender send r70: sending batch 6 CPut to (n1,s1):1 -dist sender send r70: sending batch 6 CPut to (n1,s1):1 -dist sender send r70: sending batch 1 EndTxn to (n1,s1):1 +dist sender send r71: sending batch 6 CPut to (n1,s1):1 +dist sender send r71: sending batch 6 CPut to (n1,s1):1 +dist sender send r71: sending batch 6 CPut to (n1,s1):1 +dist sender send r71: sending batch 6 CPut to (n1,s1):1 +dist sender send r71: sending batch 1 EndTxn to (n1,s1):1 statement ok CREATE TABLE streamer (pk INT PRIMARY KEY, attribute INT, blob TEXT, INDEX(attribute), FAMILY (pk, attribute, blob)); @@ -399,4 +399,4 @@ WHERE message LIKE '%r$rangeid: sending batch 42 Get%' AND message NOT LIKE '%PushTxn%' AND message NOT LIKE '%QueryTxn%' ---- -dist sender send r70: sending batch 42 Get to (n1,s1):1 +dist sender send r71: sending batch 42 Get to (n1,s1):1 diff --git a/pkg/sql/opt/exec/execbuilder/tests/5node/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/5node/generated_test.go index cd335156a793..0389ab9bee43 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/5node/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/5node/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 7 +const configIdx = 8 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/exec/execbuilder/tests/fakedist-disk/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/fakedist-disk/generated_test.go index 5c14d9138439..8b3a2304ae00 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/fakedist-disk/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/fakedist-disk/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 6 +const configIdx = 7 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/exec/execbuilder/tests/fakedist-vec-off/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/fakedist-vec-off/generated_test.go index eece7bb6f635..346ce282ab09 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/fakedist-vec-off/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/fakedist-vec-off/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 5 +const configIdx = 6 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/exec/execbuilder/tests/fakedist/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/fakedist/generated_test.go index d7be269f41cb..3ef753a44219 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/fakedist/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/fakedist/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 4 +const configIdx = 5 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.1/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.1/generated_test.go index 103143a08f7b..85f9f6ac25a4 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.1/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.1/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 18 +const configIdx = 19 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.2/generated_test.go b/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.2/generated_test.go index b6977d03d71b..939c42627440 100644 --- a/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.2/generated_test.go +++ b/pkg/sql/opt/exec/execbuilder/tests/local-mixed-24.2/generated_test.go @@ -31,7 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 19 +const configIdx = 20 var execBuildLogicTestDir string diff --git a/pkg/sql/opt/indexrec/testdata/geospatial b/pkg/sql/opt/indexrec/testdata/geospatial index 755cd1e6d148..d2750810ad9e 100644 --- a/pkg/sql/opt/indexrec/testdata/geospatial +++ b/pkg/sql/opt/indexrec/testdata/geospatial @@ -109,9 +109,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters └── st_covers(geom1:4, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -209,9 +207,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters └── st_covers('0102000000020000000000000000000000000000000000000000000000000000000000000000000040', geom2:8) [outer=(8), immutable, constraints=(/8: (/NULL - ])] @@ -353,9 +349,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters └── st_contains(geom2:8, '0102000000020000000000000000000000000000000000000000000000000000000000000000000040') [outer=(8), immutable, constraints=(/8: (/NULL - ])] @@ -513,9 +507,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters └── st_equals(geom1:4, '0102000000020000000000000000000000000000000000000000000000000000000000000000000040') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -615,9 +607,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 257.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 257.575556 └── filters └── st_intersects(geom1:4, '01030000000100000005000000000000000000F03F0000000000000040000000000000F03F00000000000010400000000000000840000000000000104000000000000008400000000000000040000000000000F03F0000000000000040') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -750,9 +740,7 @@ select │ │ ├── ["B\xfdO\xb0\x00\x00\x00\x00\x00\x00", "B\xfdO\xb0\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfdO\xc0\x00\x00\x00\x00\x00\x00", "B\xfdO\xc0\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 237.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 237.575556 └── filters └── st_dwithinexclusive('0101000020E6100000A6272CF1807245C0A6B73F170DFC5240', geog2:9, 10.0, true) [outer=(9), immutable, constraints=(/9: (/NULL - ])] @@ -918,9 +906,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 257.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 257.575556 └── filters ├── bbox1:6 && bbox2:7 [outer=(6,7), immutable, constraints=(/6: (/NULL - ]; /7: (/NULL - ])] └── 'BOX(1 2,3 4)' ~ geom1:4 [outer=(4), immutable] @@ -1114,9 +1100,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters ├── st_overlaps(geom1:4, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(4), immutable, constraints=(/4: (/NULL - ])] └── st_overlaps('01010000C00000000000000000000000000000000000000000000000000000000000000000', geom2:8) [outer=(8), immutable, constraints=(/8: (/NULL - ])] @@ -1220,9 +1204,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 19.1866667 - │ ├── key: (11) - │ └── fd: (11)-->(15) + │ └── cost: 19.1866667 └── filters └── st_overlaps(geom1:4, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -1322,9 +1304,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters ├── st_overlaps(geom1:4, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(4), immutable, constraints=(/4: (/NULL - ])] └── k:1 > 2 [outer=(1), constraints=(/1: [/3 - ]; tight)] @@ -1461,9 +1441,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (11) - │ └── fd: (11)-->(14) + │ └── cost: 253.575556 └── filters ├── st_overlaps(geom1:4, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(4), immutable, constraints=(/4: (/NULL - ])] ├── k:1 > 2 [outer=(1), constraints=(/1: [/3 - ]; tight)] @@ -1668,9 +1646,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 19.1866667 - │ ├── key: (11) - │ └── fd: (11)-->(15) + │ └── cost: 19.1866667 └── filters └── geom1:4 && 'BOX(1 2,3 4)' [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -1822,9 +1798,7 @@ select │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 141.575556 - │ ├── key: (3) - │ └── fd: (3)-->(6) + │ └── cost: 141.575556 └── filters └── st_covers(geom1:2, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1926,9 +1900,7 @@ select │ │ ├── ["B\xfd\x10@\x00\x00\x00\x00\x00\x00", "B\xfd\x10@\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 253.575556 - │ ├── key: (3) - │ └── fd: (3)-->(8) + │ └── cost: 253.575556 └── filters └── st_covers(geom1:2, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1969,8 +1941,6 @@ select │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── cost: 141.575556 - │ ├── key: (3) - │ └── fd: (3)-->(8) + │ └── cost: 141.575556 └── filters └── st_covers(geom1:2, '01010000C00000000000000000000000000000000000000000000000000000000000000000') [outer=(2), immutable, constraints=(/2: (/NULL - ])] diff --git a/pkg/sql/opt/indexrec/testdata/index b/pkg/sql/opt/indexrec/testdata/index index d3c63a07ca55..867a49923cc0 100644 --- a/pkg/sql/opt/indexrec/testdata/index +++ b/pkg/sql/opt/indexrec/testdata/index @@ -1559,9 +1559,7 @@ project │ │ └── spans │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ └── ["7foo\x00\x01\x121\x00\x01", "7foo\x00\x01\x121\x00\x01"] - │ ├── cost: 137.575556 - │ ├── key: (6) - │ └── fd: (6)-->(9) + │ └── cost: 137.575556 └── filters └── j:4 <@ '{"foo": "1"}' [outer=(4), immutable] @@ -1635,9 +1633,7 @@ project │ │ └── spans │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ └── ["7foo\x00\x01\x121\x00\x01", "7foo\x00\x01\x121\x00\x01"] - │ ├── cost: 19.1866667 - │ ├── key: (6) - │ └── fd: (6)-->(10) + │ └── cost: 19.1866667 └── filters └── j:4 <@ '{"foo": "1"}' [outer=(4), immutable] @@ -1688,9 +1684,7 @@ project │ │ └── spans │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ └── ["7foo\x00\x01\x121\x00\x01", "7foo\x00\x01\x121\x00\x01"] - │ ├── cost: 18.1272733 - │ ├── key: (6) - │ └── fd: (6)-->(12) + │ └── cost: 18.1272733 └── filters └── j:4 <@ '{"foo": "1"}' [outer=(4), immutable] diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 137001d6243e..4064af32d37a 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -118,23 +118,24 @@ func (b *logicalPropsBuilder) buildScanProps(scan *ScanExpr, rel *props.Relation } else { // Initialize key FD's from the table schema, including constant columns // from the constraint, minus any columns that are not projected by the - // Scan operator. If the scan is an inverted index scan, then the table - // FDs should not be used because each logical row can correspond to - // multiple tuples in the inverted index. + // Scan operator. // - // TODO(#122225): Note that we currently copy table FDs for inverted - // scans with inverted constraints, i.e., all inverted scans that are - // not generated by GenerateTrigramSimilarityInvertedIndexScans. We - // should not be doing this because these FDs are not correct because - // scans over inverted indexes can produce multiple tuples for each - // logical row in the table. We maintain this behavior for now for - // historical reasons - it's been "broken" since inverted scans were - // introduced. It has potential to cause incorrect results, though there - // are no known correctness bugs, likely because there are no optimizer - // rules that operate on inverted filter expressions. We should fix this - // to prevent potential bugs in the future by not copying the table FDs - // here and adding primary keys to the FDs of inverted filters. - if !scan.IsInvertedScan(md) || scan.InvertedConstraint != nil { + // If the scan is an inverted index scan, then the table FDs should only + // be copied if a single key is scanned. A scan over multiple inverted + // keys could produce multiple tuples for a single logical row in the + // table, so the table's FD keys are not upheld during the scan. An + // inverted filter planned above the inverted index scan will + // deduplicate these tuples. Note that we could include the table FDs + // with the keys omitted, but for simplicity we omit the FDs entirely. + // + // TODO(mgartner): For multi-key inverted index scans we could add a key + // that includes the inverted column and the PK columns (similar to how + // partial index keys are added below). This is only necessary if there + // is an optimization that requires knowledge of this key. + singleKeyInvertedScan := scan.InvertedConstraint != nil && + scan.InvertedConstraint.Len() == 1 && + scan.InvertedConstraint[0].IsSingleVal() + if !scan.IsInvertedScan(md) || singleKeyInvertedScan { rel.FuncDeps.CopyFrom(MakeTableFuncDep(md, scan.Table)) } if scan.Constraint != nil { @@ -146,9 +147,9 @@ func (b *logicalPropsBuilder) buildScanProps(scan *ScanExpr, rel *props.Relation if pred != nil { b.addFiltersToFuncDep(pred, &rel.FuncDeps) - // Partial index keys are not added to the functional dependencies in - // MakeTableFuncDep, because they do not apply to the entire table. They are - // added here if the scan uses a partial index. + // Partial index keys are not added to the functional dependencies + // in MakeTableFuncDep, because they do not apply to the entire + // table. They are added here if the scan uses a partial index. index := md.Table(scan.Table).Index(scan.Index) var keyCols opt.ColSet for col := 0; col < index.LaxKeyColumnCount(); col++ { @@ -378,10 +379,17 @@ func (b *logicalPropsBuilder) buildInvertedFilterProps( // Functional Dependencies // ----------------------- - // Start with copy of FuncDepSet from input, add FDs from the outer columns, - // modify with any additional not-null columns, then possibly simplify by - // calling ProjectCols. - rel.FuncDeps.CopyFrom(&inputProps.FuncDeps) + // The inverted scan beneath the inverted filter may produce multiple tuples + // for a single logical row, so the table's FD keys are not upheld during + // the inverted scan. The inverted filter deduplicates these tuples, making + // the table's FD keys valid. We start by adding the PK columns of + // underlying table of the inverted scan. We could add all the FDs of the + // underlying table, but there is no need to because inverted filters always + // produce just the PK columns. + // + // Then we add FDs from the outer columns, modify with any additional + // not-null columns, and possibly simplify by calling ProjectCols. + rel.FuncDeps.AddStrictKey(invFilter.PKCols, rel.OutputCols) addOuterColsToFuncDep(rel.OuterCols, &rel.FuncDeps) rel.FuncDeps.MakeNotNull(rel.NotNullCols) rel.FuncDeps.ProjectCols(rel.OutputCols) diff --git a/pkg/sql/opt/memo/memo.go b/pkg/sql/opt/memo/memo.go index 590a7723687f..dcc01510283a 100644 --- a/pkg/sql/opt/memo/memo.go +++ b/pkg/sql/opt/memo/memo.go @@ -201,6 +201,7 @@ type Memo struct { pushOffsetIntoIndexJoin bool usePolymorphicParameterFix bool useConditionalHoistFix bool + pushLimitIntoProjectFilteredScan bool // txnIsoLevel is the isolation level under which the plan was created. This // affects the planning of some locking operations, so it must be included in @@ -288,6 +289,7 @@ func (m *Memo) Init(ctx context.Context, evalCtx *eval.Context) { pushOffsetIntoIndexJoin: evalCtx.SessionData().OptimizerPushOffsetIntoIndexJoin, usePolymorphicParameterFix: evalCtx.SessionData().OptimizerUsePolymorphicParameterFix, useConditionalHoistFix: evalCtx.SessionData().OptimizerUseConditionalHoistFix, + pushLimitIntoProjectFilteredScan: evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan, txnIsoLevel: evalCtx.TxnIsoLevel, } m.metadata.Init() @@ -453,6 +455,7 @@ func (m *Memo) IsStale( m.pushOffsetIntoIndexJoin != evalCtx.SessionData().OptimizerPushOffsetIntoIndexJoin || m.usePolymorphicParameterFix != evalCtx.SessionData().OptimizerUsePolymorphicParameterFix || m.useConditionalHoistFix != evalCtx.SessionData().OptimizerUseConditionalHoistFix || + m.pushLimitIntoProjectFilteredScan != evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan || m.txnIsoLevel != evalCtx.TxnIsoLevel { return true, nil } diff --git a/pkg/sql/opt/memo/memo_test.go b/pkg/sql/opt/memo/memo_test.go index 17ce296907ab..de22d58c34f8 100644 --- a/pkg/sql/opt/memo/memo_test.go +++ b/pkg/sql/opt/memo/memo_test.go @@ -507,6 +507,12 @@ func TestMemoIsStale(t *testing.T) { evalCtx.SessionData().OptimizerUsePolymorphicParameterFix = false notStale() + // Stale optimizer_push_limit_into_project_filtered_scan. + evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan = true + stale() + evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan = false + notStale() + // User no longer has access to view. catalog.View(tree.NewTableNameWithSchema("t", catconstants.PublicSchemaName, "abcview")).Revoked = true _, err = o.Memo().IsStale(ctx, &evalCtx, catalog) diff --git a/pkg/sql/opt/memo/testdata/logprops/scan b/pkg/sql/opt/memo/testdata/logprops/scan index 75a81f6b40f5..d6397de11771 100644 --- a/pkg/sql/opt/memo/testdata/logprops/scan +++ b/pkg/sql/opt/memo/testdata/logprops/scan @@ -456,3 +456,302 @@ project │ └── interesting orderings: (+4,+1) (+7,+(3|6),+4,+1) └── projections └── variable: s:2 [as=c_s:5, type=string, outer=(2)] + +# Test FDs for inverted index scans. +exec-ddl +CREATE TABLE inv ( + k INT PRIMARY KEY, + a INT[], + b BOOL, + INVERTED INDEX a_idx (a), + INVERTED INDEX a_b_idx (a) WHERE b +) +---- + +exec-ddl +CREATE TABLE inv_multi_pks ( + k1 INT, + k2 INT, + a INT[], + b BOOL, + PRIMARY KEY (k1, k2), + INVERTED INDEX a_idx (a), + INVERTED INDEX a_b_idx (a) WHERE b +) +---- + +# If the inverted scan scans a single key, then it upholds the FD keys of the +# underlying table. +opt +SELECT * FROM inv WHERE a @> ARRAY[1] +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool) + ├── immutable + ├── key: (1) + ├── fd: (1)-->(2,3) + ├── prune: (1,3) + ├── interesting orderings: (+1) + └── scan inv@a_idx,inverted + ├── columns: k:1(int!null) + ├── inverted constraint: /6/1 + │ └── spans: ["\x89", "\x89"] + ├── key: (1) + └── prune: (1) + +# Same as above, but with multiple PK columns. +opt +SELECT * FROM inv_multi_pks WHERE a @> ARRAY[1] +---- +index-join inv_multi_pks + ├── columns: k1:1(int!null) k2:2(int!null) a:3(int[]!null) b:4(bool) + ├── immutable + ├── key: (1,2) + ├── fd: (1,2)-->(3,4) + ├── prune: (1,2,4) + ├── interesting orderings: (+1,+2) + └── scan inv_multi_pks@a_idx,inverted + ├── columns: k1:1(int!null) k2:2(int!null) + ├── inverted constraint: /7/1/2 + │ └── spans: ["\x89", "\x89"] + ├── key: (1,2) + └── prune: (1,2) + +# If the inverted scan scans multiple keys, then it does NOT uphold the FD keys +# of the underlying table. The table FD keys are only valid after the inverted +# filter expression. +opt +SELECT * FROM inv@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 2] +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool) + ├── immutable + ├── key: (1) + ├── fd: (1)-->(2,3) + ├── prune: (1,3) + ├── interesting orderings: (+1) + └── inverted-filter + ├── columns: k:1(int!null) + ├── inverted expression: /6 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8a", "\x8a"] + ├── key: (1) + └── scan inv@a_idx,inverted + ├── columns: k:1(int!null) a_inverted_key:6(encodedkey!null) + ├── inverted constraint: /6/1 + │ └── spans: ["\x89", "\x8b") + ├── flags: no-zigzag-join + └── prune: (1,6) + +# Same as above, but with multiple inverted spans instead of a single, multi-key +# span. +opt +SELECT * FROM inv@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 3] +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool) + ├── immutable + ├── key: (1) + ├── fd: (1)-->(2,3) + ├── prune: (1,3) + ├── interesting orderings: (+1) + └── inverted-filter + ├── columns: k:1(int!null) + ├── inverted expression: /6 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8b", "\x8b"] + ├── key: (1) + └── scan inv@a_idx,inverted + ├── columns: k:1(int!null) a_inverted_key:6(encodedkey!null) + ├── inverted constraint: /6/1 + │ └── spans + │ ├── ["\x89", "\x89"] + │ └── ["\x8b", "\x8b"] + ├── flags: no-zigzag-join + └── prune: (1,6) + +# Same as above, but with multiple PK columns. +opt +SELECT * FROM inv_multi_pks@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 3] +---- +index-join inv_multi_pks + ├── columns: k1:1(int!null) k2:2(int!null) a:3(int[]!null) b:4(bool) + ├── immutable + ├── key: (1,2) + ├── fd: (1,2)-->(3,4) + ├── prune: (1,2,4) + ├── interesting orderings: (+1,+2) + └── inverted-filter + ├── columns: k1:1(int!null) k2:2(int!null) + ├── inverted expression: /7 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8b", "\x8b"] + ├── key: (1,2) + └── scan inv_multi_pks@a_idx,inverted + ├── columns: k1:1(int!null) k2:2(int!null) a_inverted_key:7(encodedkey!null) + ├── inverted constraint: /7/1/2 + │ └── spans + │ ├── ["\x89", "\x89"] + │ └── ["\x8b", "\x8b"] + ├── flags: no-zigzag-join + └── prune: (1,2,7) + +# A single key scan over a partial inverted index upholds the FD keys of the +# underlying table. +opt +SELECT * FROM inv WHERE a @> ARRAY[1] AND b +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool!null) + ├── immutable + ├── key: (1) + ├── fd: ()-->(3), (1)-->(2) + ├── prune: (1) + ├── interesting orderings: (+1 opt(3)) + └── scan inv@a_b_idx,inverted,partial + ├── columns: k:1(int!null) + ├── inverted constraint: /7/1 + │ └── spans: ["\x89", "\x89"] + ├── key: (1) + └── prune: (1) + +# Same as above, but with multiple PK columns. +opt +SELECT * FROM inv_multi_pks WHERE a @> ARRAY[1] AND b +---- +index-join inv_multi_pks + ├── columns: k1:1(int!null) k2:2(int!null) a:3(int[]!null) b:4(bool!null) + ├── immutable + ├── key: (1,2) + ├── fd: ()-->(4), (1,2)-->(3) + ├── prune: (1,2) + ├── interesting orderings: (+1,+2 opt(4)) + └── scan inv_multi_pks@a_b_idx,inverted,partial + ├── columns: k1:1(int!null) k2:2(int!null) + ├── inverted constraint: /8/1/2 + │ └── spans: ["\x89", "\x89"] + ├── key: (1,2) + └── prune: (1,2) + +# A multi-key scan over a partial inverted index does not uphold the FD keys of +# the underlying table. +opt +SELECT * FROM inv@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 2] AND b +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool!null) + ├── immutable + ├── key: (1) + ├── fd: ()-->(3), (1)-->(2) + ├── prune: (1) + ├── interesting orderings: (+1 opt(3)) + └── inverted-filter + ├── columns: k:1(int!null) + ├── inverted expression: /7 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8a", "\x8a"] + ├── key: (1) + └── scan inv@a_b_idx,inverted,partial + ├── columns: k:1(int!null) a_inverted_key:7(encodedkey!null) + ├── inverted constraint: /7/1 + │ └── spans: ["\x89", "\x8b") + ├── flags: no-zigzag-join + ├── key: (1,7) + └── prune: (1,7) + +# Same as above, but with multiple inverted spans instead of a single, multi-key +# span. +opt +SELECT * FROM inv@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 3] AND b +---- +index-join inv + ├── columns: k:1(int!null) a:2(int[]!null) b:3(bool!null) + ├── immutable + ├── key: (1) + ├── fd: ()-->(3), (1)-->(2) + ├── prune: (1) + ├── interesting orderings: (+1 opt(3)) + └── inverted-filter + ├── columns: k:1(int!null) + ├── inverted expression: /7 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8b", "\x8b"] + ├── key: (1) + └── scan inv@a_b_idx,inverted,partial + ├── columns: k:1(int!null) a_inverted_key:7(encodedkey!null) + ├── inverted constraint: /7/1 + │ └── spans + │ ├── ["\x89", "\x89"] + │ └── ["\x8b", "\x8b"] + ├── flags: no-zigzag-join + ├── key: (1,7) + └── prune: (1,7) + +# Same as above, but with multiple PK columns. +opt +SELECT * FROM inv_multi_pks@{NO_ZIGZAG_JOIN} WHERE a @> ARRAY[1, 3] AND b +---- +index-join inv_multi_pks + ├── columns: k1:1(int!null) k2:2(int!null) a:3(int[]!null) b:4(bool!null) + ├── immutable + ├── key: (1,2) + ├── fd: ()-->(4), (1,2)-->(3) + ├── prune: (1,2) + ├── interesting orderings: (+1,+2 opt(4)) + └── inverted-filter + ├── columns: k1:1(int!null) k2:2(int!null) + ├── inverted expression: /8 + │ ├── tight: true, unique: true + │ ├── union spans: empty + │ └── INTERSECTION + │ ├── span expression + │ │ ├── tight: true, unique: true + │ │ └── union spans: ["\x89", "\x89"] + │ └── span expression + │ ├── tight: true, unique: true + │ └── union spans: ["\x8b", "\x8b"] + ├── key: (1,2) + └── scan inv_multi_pks@a_b_idx,inverted,partial + ├── columns: k1:1(int!null) k2:2(int!null) a_inverted_key:8(encodedkey!null) + ├── inverted constraint: /8/1/2 + │ └── spans + │ ├── ["\x89", "\x89"] + │ └── ["\x8b", "\x8b"] + ├── flags: no-zigzag-join + ├── key: (1,2,8) + └── prune: (1,2,8) diff --git a/pkg/sql/opt/memo/testdata/stats/inverted-array b/pkg/sql/opt/memo/testdata/stats/inverted-array index 12df04af152a..328dddbe8f88 100644 --- a/pkg/sql/opt/memo/testdata/stats/inverted-array +++ b/pkg/sql/opt/memo/testdata/stats/inverted-array @@ -123,11 +123,9 @@ index-join t ├── columns: k:1(int!null) a_inverted_key:5(encodedkey!null) ├── inverted constraint: /5/1 │ └── spans: ["\x8a", "\x8c") - ├── stats: [rows=20, distinct(1)=19.6078, null(1)=0, distinct(5)=2, null(5)=0] - │ histogram(5)= 0 10 0 10 - │ <--- '\x8a' --- '\x8b' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=20, distinct(1)=19.6078, null(1)=0, distinct(5)=2, null(5)=0] + histogram(5)= 0 10 0 10 + <--- '\x8a' --- '\x8b' # The inverted index is used when checking if an array column is contained by # an empty array. An additional filter is required. @@ -188,11 +186,9 @@ select │ │ └── spans │ │ ├── ["C", "C"] │ │ └── ["\x8a", "\x8a"] - │ ├── stats: [rows=20, distinct(1)=19.6078, null(1)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 10 0 10 0 0 - │ │ <--- '\x43' --- '\x8a' --- '\x8b' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=20, distinct(1)=19.6078, null(1)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 10 0 10 0 0 + │ <--- '\x43' --- '\x8a' --- '\x8b' └── filters └── a:2 <@ ARRAY[2] [type=bool, outer=(2), immutable] @@ -227,11 +223,9 @@ select │ │ └── spans │ │ ├── ["C", "C"] │ │ └── ["\x8a", "\x8c") - │ ├── stats: [rows=30, distinct(1)=29.4118, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 10 0 10 - │ │ <--- '\x43' --- '\x8a' --- '\x8b' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=30, distinct(1)=29.4118, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 10 0 10 + │ <--- '\x43' --- '\x8a' --- '\x8b' └── filters └── (a:2 <@ ARRAY[2]) OR (a:2 <@ ARRAY[3]) [type=bool, outer=(2), immutable] @@ -272,11 +266,9 @@ select │ │ └── spans │ │ ├── ["C", "C"] │ │ └── ["\x8a", "\x8c") - │ ├── stats: [rows=30, distinct(1)=29.4118, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 10 0 10 - │ │ <--- '\x43' --- '\x8a' --- '\x8b' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=30, distinct(1)=29.4118, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 10 0 10 + │ <--- '\x43' --- '\x8a' --- '\x8b' └── filters ├── a:2 <@ ARRAY[2] [type=bool, outer=(2), immutable] └── a:2 <@ ARRAY[3] [type=bool, outer=(2), immutable] diff --git a/pkg/sql/opt/memo/testdata/stats/inverted-geo b/pkg/sql/opt/memo/testdata/stats/inverted-geo index e0fbef8c6eef..c4672d663c3a 100644 --- a/pkg/sql/opt/memo/testdata/stats/inverted-geo +++ b/pkg/sql/opt/memo/testdata/stats/inverted-geo @@ -486,11 +486,9 @@ select │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── stats: [rows=100, distinct(3)=76.9231, null(3)=0, distinct(6)=1, null(6)=0] - │ │ histogram(6)= 0 100 0 0 - │ │ <--- '\x42fd1000000100000000' --- '\x42fd1400000000000001' - │ ├── key: (3) - │ └── fd: (3)-->(6) + │ └── stats: [rows=100, distinct(3)=76.9231, null(3)=0, distinct(6)=1, null(6)=0] + │ histogram(6)= 0 100 0 0 + │ <--- '\x42fd1000000100000000' --- '\x42fd1400000000000001' └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -529,11 +527,9 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── flags: force-index=t_g_idx - │ ├── stats: [rows=100, distinct(3)=76.9231, null(3)=0, distinct(6)=1, null(6)=0] - │ │ histogram(6)= 0 100 0 0 - │ │ <--- '\x42fd1000000100000000' --- '\x42fd1400000000000001' - │ ├── key: (3) - │ └── fd: (3)-->(6) + │ └── stats: [rows=100, distinct(3)=76.9231, null(3)=0, distinct(6)=1, null(6)=0] + │ histogram(6)= 0 100 0 0 + │ <--- '\x42fd1000000100000000' --- '\x42fd1400000000000001' └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] diff --git a/pkg/sql/opt/memo/testdata/stats/inverted-geo-multi-column b/pkg/sql/opt/memo/testdata/stats/inverted-geo-multi-column index 16ad704e204f..126c8c23b02b 100644 --- a/pkg/sql/opt/memo/testdata/stats/inverted-geo-multi-column +++ b/pkg/sql/opt/memo/testdata/stats/inverted-geo-multi-column @@ -107,13 +107,11 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── flags: force-index=m - │ ├── stats: [rows=59.37842, distinct(1)=16.9653, null(1)=0, distinct(3)=1, null(3)=0, distinct(7)=1.18757, null(7)=0, distinct(3,7)=1.18757, null(3,7)=0] - │ │ histogram(3)= 0 59.378 - │ │ <--- 'banana' - │ │ histogram(7)= 0 0 9.1125e-11 50 9.3784 0 0 0 - │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── stats: [rows=59.37842, distinct(1)=16.9653, null(1)=0, distinct(3)=1, null(3)=0, distinct(7)=1.18757, null(7)=0, distinct(3,7)=1.18757, null(3,7)=0] + │ histogram(3)= 0 59.378 + │ <--- 'banana' + │ histogram(7)= 0 0 9.1125e-11 50 9.3784 0 0 0 + │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -163,8 +161,7 @@ project │ │ <--- 'banana' │ │ histogram(8)= 0 0 9.1125e-11 50 9.3784 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── key: (1,8) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -225,13 +222,11 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── flags: force-index=m - │ ├── stats: [rows=118.7568, distinct(1)=33.9305, null(1)=0, distinct(3)=2, null(3)=0, distinct(7)=1.18757, null(7)=0, distinct(3,7)=2.37514, null(3,7)=0] - │ │ histogram(3)= 0 59.378 0 59.378 - │ │ <--- 'banana' --- 'cherry' - │ │ histogram(7)= 0 0 1.8225e-10 100 18.757 0 0 0 - │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── stats: [rows=118.7568, distinct(1)=33.9305, null(1)=0, distinct(3)=2, null(3)=0, distinct(7)=1.18757, null(7)=0, distinct(3,7)=2.37514, null(3,7)=0] + │ histogram(3)= 0 59.378 0 59.378 + │ <--- 'banana' --- 'cherry' + │ histogram(7)= 0 0 1.8225e-10 100 18.757 0 0 0 + │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -281,8 +276,7 @@ project │ │ <--- 'banana' --- 'cherry' │ │ histogram(9)= 0 0 1.8657e-10 102.37 19.201 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── key: (1,9) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -347,8 +341,7 @@ project │ │ <--- 400 - │ │ histogram(10)= 0 0 3.8427e-12 2.1085 0.39548 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' --------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(10) + │ └── key: (1,10) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -399,14 +392,12 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── flags: force-index=mp - │ ├── stats: [rows=10.11106, distinct(1)=2.88887, null(1)=0, distinct(3)=2, null(3)=0, distinct(4)=3, null(4)=0, distinct(10)=1.18757, null(10)=0, distinct(3,4,10)=7.05532, null(3,4,10)=0] - │ │ histogram(3)= 0 5.0555 0 5.0555 - │ │ <--- 'banana' --- 'cherry' - │ │ histogram(4)= 0 1.6852 0 3.3704 0 5.0555 - │ │ <--- 200 ---- 300 ---- 400 - - │ │ histogram(10)= 0 0 1.5517e-11 8.5141 1.597 0 0 0 - │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' ------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(10) + │ └── stats: [rows=10.11106, distinct(1)=2.88887, null(1)=0, distinct(3)=2, null(3)=0, distinct(4)=3, null(4)=0, distinct(10)=1.18757, null(10)=0, distinct(3,4,10)=7.05532, null(3,4,10)=0] + │ histogram(3)= 0 5.0555 0 5.0555 + │ <--- 'banana' --- 'cherry' + │ histogram(4)= 0 1.6852 0 3.3704 0 5.0555 + │ <--- 200 ---- 300 ---- 400 - + │ histogram(10)= 0 0 1.5517e-11 8.5141 1.597 0 0 0 + │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' ------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] diff --git a/pkg/sql/opt/memo/testdata/stats/inverted-json b/pkg/sql/opt/memo/testdata/stats/inverted-json index b328de5242b9..def230982ac0 100644 --- a/pkg/sql/opt/memo/testdata/stats/inverted-json +++ b/pkg/sql/opt/memo/testdata/stats/inverted-json @@ -106,11 +106,9 @@ index-join t │ ├── ["7\x00\x019", "7\x00\x019"] │ └── ["7\x00\xff", "8") ├── flags: force-index=j_idx - ├── stats: [rows=1110, distinct(1)=1000, null(1)=0, distinct(5)=4, null(5)=0] - │ histogram(5)= 0 10 0 990 0 100 0 10 - │ <--- '\x37000139' --- '\x3761000112620001' --- '\x3763000112640001' --- '\x3765000112660001' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=1110, distinct(1)=1000, null(1)=0, distinct(5)=4, null(5)=0] + histogram(5)= 0 10 0 990 0 100 0 10 + <--- '\x37000139' --- '\x3761000112620001' --- '\x3763000112640001' --- '\x3765000112660001' # An inverted index scan is preferred for a more selective filter. opt @@ -177,11 +175,9 @@ index-join t │ └── spans │ ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] │ └── ["7e\x00\x01\x12f\x00\x01", "7e\x00\x01\x12f\x00\x01"] - ├── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] - │ histogram(5)= 0 100 0 10 - │ <--- '\x3763000112640001' --- '\x3765000112660001' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] + histogram(5)= 0 100 0 10 + <--- '\x3763000112640001' --- '\x3765000112660001' # Containment of an empty array requires a scan over all array entries. opt @@ -209,11 +205,9 @@ index-join t │ ├── ["7\x00\x018", "7\x00\x018"] │ └── ["7\x00\x03", "7\x00\x03"] ├── flags: force-index=j_idx - ├── stats: [rows=1110, distinct(1)=1000, null(1)=0, distinct(5)=4, null(5)=0] - │ histogram(5)= 0 10 0 990 0 100 0 10 0 0 - │ <--- '\x37000138' --- '\x37000300012a0200' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x370004' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=1110, distinct(1)=1000, null(1)=0, distinct(5)=4, null(5)=0] + histogram(5)= 0 10 0 990 0 100 0 10 0 0 + <--- '\x37000138' --- '\x37000300012a0200' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x370004' # An inverted index scan is preferred for a more selective filter. opt @@ -260,11 +254,9 @@ index-join t │ └── spans │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] │ └── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - ├── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] - │ histogram(5)= 0 100 0 10 0 0 - │ <--- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] + histogram(5)= 0 100 0 10 0 0 + <--- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' # An inverted index scan is preferred for the containment of an indexed column # by an empty array. An additional filter is required. @@ -351,11 +343,9 @@ select │ │ └── spans │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - │ ├── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 0 - │ │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3763000112640002' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 10 0 100 0 0 + │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3763000112640002' └── filters └── j:2 <@ '{"c": "d"}' [type=bool, outer=(2), immutable] @@ -392,11 +382,9 @@ select │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] │ │ └── ["7e\x00\x01\x12f\x00\x01", "7e\x00\x01\x12f\x00\x01"] - │ ├── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 10 - │ │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3765000112660001' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 100 0 10 + │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3765000112660001' └── filters └── (j:2 <@ '{"c": "d"}') OR (j:2 <@ '{"e": "f"}') [type=bool, outer=(2), immutable] @@ -437,11 +425,9 @@ select │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] │ │ └── ["7e\x00\x01\x12f\x00\x01", "7e\x00\x01\x12f\x00\x01"] - │ ├── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 10 - │ │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3765000112660001' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 100 0 10 + │ <--- '\x37000139' --- '\x3763000112640001' --- '\x3765000112660001' └── filters ├── j:2 <@ '{"c": "d"}' [type=bool, outer=(2), immutable] └── j:2 <@ '{"e": "f"}' [type=bool, outer=(2), immutable] @@ -479,11 +465,9 @@ select │ │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] │ │ ├── ["7\x00\x018", "7\x00\x018"] │ │ └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ ├── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 0 - │ │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0401' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=110, distinct(1)=99.0991, null(1)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 10 0 100 0 0 + │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0401' └── filters └── j:2 <@ '[2]' [type=bool, outer=(2), immutable] @@ -524,11 +508,9 @@ select │ │ ├── ["7\x00\x018", "7\x00\x018"] │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] │ │ └── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ ├── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 10 0 0 - │ │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 100 0 10 0 0 + │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' └── filters └── (j:2 <@ '[2]') OR (j:2 <@ '[3]') [type=bool, outer=(2), immutable] @@ -575,11 +557,9 @@ select │ │ ├── ["7\x00\x018", "7\x00\x018"] │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] │ │ └── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ ├── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] - │ │ histogram(5)= 0 10 0 100 0 10 0 0 - │ │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=120, distinct(1)=108.108, null(1)=0, distinct(5)=3, null(5)=0] + │ histogram(5)= 0 10 0 100 0 10 0 0 + │ <--- '\x37000138' --- '\x37000300012a0400' --- '\x37000300012a0600' --- '\x37000300012a0601' └── filters ├── j:2 <@ '[2]' [type=bool, outer=(2), immutable] └── j:2 <@ '[3]' [type=bool, outer=(2), immutable] @@ -664,11 +644,9 @@ index-join t │ └── spans │ ├── ["7\x00\x019", "7\x00\x019"] │ └── ["7\x00\xff", "8") - ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ histogram(5)= 0 100 - │ <--- '\x37000139' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + histogram(5)= 0 100 + <--- '\x37000139' # An inverted index scan is preferred for containment of an empty array when # most inverted index entries are non-arrays. @@ -696,11 +674,9 @@ index-join t │ └── spans │ ├── ["7\x00\x018", "7\x00\x018"] │ └── ["7\x00\x03", "7\x00\x03"] - ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ histogram(5)= 0 100 0 0 - │ <--- '\x37000138' --- '\x37000139' - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + histogram(5)= 0 100 0 0 + <--- '\x37000138' --- '\x37000139' # A query with the fetch val operator with a single key/val pair object on the # right side uses the inverted index, and the inverted expression is not tight. @@ -791,10 +767,8 @@ select │ │ └── spans │ │ ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] │ │ └── ["7a\x00\x02d\x00\x01\x12e\x00\x01", "7a\x00\x02d\x00\x01\x12e\x00\x01"] - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') = '{"b": "c", "d": "e"}' [type=bool, outer=(2), immutable] @@ -850,10 +824,8 @@ select │ │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] │ │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] │ │ └── ["7a\x00\x02\x00\x03\x00\x01\x12e\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12e\x00\x01"] - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') = '["b", "c", "d", "e"]' [type=bool, outer=(2), immutable] @@ -902,10 +874,8 @@ select │ │ ├── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] │ │ ├── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] │ │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12e\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12e\x00\x01"] - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') = '{"b": ["c", "d", "e"]}' [type=bool, outer=(2), immutable] @@ -940,10 +910,8 @@ select │ │ └── spans │ │ ├── ["7a\x00\x018", "7a\x00\x018"] │ │ └── ["7a\x00\x02\x00\x03", "7a\x00\x02\x00\x03"] - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') = '[]' [type=bool, outer=(2), immutable] @@ -978,10 +946,8 @@ select │ │ └── spans │ │ ├── ["7a\x00\x019", "7a\x00\x019"] │ │ └── ["7a\x00\x02\x00\xff", "7a\x00\x03") - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') = '{}' [type=bool, outer=(2), immutable] @@ -1010,10 +976,8 @@ index-join t │ └── spans │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] │ └── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] - ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ histogram(5)= - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + histogram(5)= # A query with fetch val and contained by operators uses the inverted index, # and the expression is not tight. @@ -1046,11 +1010,9 @@ select │ │ └── spans │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ └── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] - │ ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= 0 100 - │ │ <--- '\x37000139' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= 0 100 + │ <--- '\x37000139' └── filters └── (j:2->'a') <@ '1' [type=bool, outer=(2), immutable] @@ -1079,10 +1041,8 @@ index-join t │ └── spans │ ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] - ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ histogram(5)= - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + histogram(5)= # A query with chained fetch val and contained by operators uses the inverted # index, and the expression is not tight. @@ -1117,11 +1077,9 @@ select │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ ├── ["7a\x00\x019", "7a\x00\x019"] │ │ └── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] - │ ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= 0 100 - │ │ <--- '\x37000139' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= 0 100 + │ <--- '\x37000139' └── filters └── ((j:2->'a')->'b') <@ '"c"' [type=bool, outer=(2), immutable] @@ -1177,11 +1135,9 @@ select │ │ ├── ["7\x00\x019", "7\x00\x019"] │ │ ├── ["7a\x00\x019", "7a\x00\x019"] │ │ └── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] - │ ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= 0 100 - │ │ <--- '\x37000139' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= 0 100 + │ <--- '\x37000139' └── filters └── (j:2->'a') <@ '{"b": "c"}' [type=bool, outer=(2), immutable] @@ -1221,10 +1177,8 @@ select │ │ └── spans │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] │ │ └── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] - │ ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ │ histogram(5)= - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + │ histogram(5)= └── filters └── (j:2->'a') @> '[1, 2]' [type=bool, outer=(2), immutable] @@ -1267,11 +1221,9 @@ select │ │ ├── ["7a\x00\x018", "7a\x00\x018"] │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] │ │ └── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] - │ ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= 0 100 - │ │ <--- '\x37000139' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= 0 100 + │ <--- '\x37000139' └── filters └── (j:2->'a') <@ '[1, 2]' [type=bool, outer=(2), immutable] @@ -1301,10 +1253,8 @@ index-join t │ └── spans │ ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] - ├── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] - │ histogram(5)= - ├── key: (1) - └── fd: (1)-->(5) + └── stats: [rows=4e-07, distinct(1)=4e-07, null(1)=0, distinct(5)=4e-07, null(5)=0] + histogram(5)= # A query with fetch val and contains operators uses the inverted index when # the fetch val is on the right side. @@ -1347,10 +1297,8 @@ select │ │ ├── ["7a\x00\x02b\x00\x018", "7a\x00\x02b\x00\x018"] │ │ ├── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02b\x00\x02\x00\x03\x00\x01*\x02\x00"] │ │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02b\x00\x02\x00\x03\x00\x01*\x04\x00"] - │ ├── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= 0 100 - │ │ <--- '\x37000139' - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── stats: [rows=100, distinct(1)=100, null(1)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= 0 100 + │ <--- '\x37000139' └── filters └── '[1, 2]' @> ((j:2->'a')->'b') [type=bool, outer=(2), immutable] diff --git a/pkg/sql/opt/memo/testdata/stats/inverted-trigram b/pkg/sql/opt/memo/testdata/stats/inverted-trigram index 1989b3c145f5..73c56f052138 100644 --- a/pkg/sql/opt/memo/testdata/stats/inverted-trigram +++ b/pkg/sql/opt/memo/testdata/stats/inverted-trigram @@ -114,9 +114,7 @@ select │ │ └── spans │ │ ├── ["\x12bla\x00\x01", "\x12bla\x00\x01"] │ │ └── ["\x12lah\x00\x01", "\x12lah\x00\x01"] - │ ├── stats: [rows=111.1111, distinct(2)=111.111, null(2)=0, distinct(5)=100, null(5)=0] - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=111.1111, distinct(2)=111.111, null(2)=0, distinct(5)=100, null(5)=0] └── filters └── a:1 LIKE '%blah%' [type=bool, outer=(1), constraints=(/1: (/NULL - ])] @@ -248,11 +246,9 @@ select │ │ └── spans │ │ ├── ["\x12bla\x00\x01", "\x12bla\x00\x01"] │ │ └── ["\x12lah\x00\x01", "\x12lah\x00\x01"] - │ ├── stats: [rows=20, distinct(2)=4, null(2)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 10 0 10 0 0 - │ │ <--- '\x12626c610001' --- '\x126c61680001' --- '\x126c61680002' - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=20, distinct(2)=4, null(2)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 10 0 10 0 0 + │ <--- '\x12626c610001' --- '\x126c61680001' --- '\x126c61680002' └── filters └── a:1 LIKE '%blah%' [type=bool, outer=(1), constraints=(/1: (/NULL - ])] @@ -287,11 +283,9 @@ select │ │ ├── ["\x12ooo\x00\x01", "\x12ooo\x00\x01"] │ │ └── ["\x12zoo\x00\x01", "\x12zoo\x00\x01"] │ ├── flags: force-index=inv - │ ├── stats: [rows=1980, distinct(2)=396, null(2)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 990 0 990 - │ │ <--- '\x126f6f6f0001' --- '\x127a6f6f0001' - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=1980, distinct(2)=396, null(2)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 990 0 990 + │ <--- '\x126f6f6f0001' --- '\x127a6f6f0001' └── filters └── a:1 LIKE '%zooo%' [type=bool, outer=(1), constraints=(/1: (/NULL - ])] @@ -494,11 +488,9 @@ select │ │ └── spans │ │ ├── ["\x12bla\x00\x01", "\x12bla\x00\x01"] │ │ └── ["\x12lah\x00\x01", "\x12lah\x00\x01"] - │ ├── stats: [rows=20, distinct(2)=4, null(2)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 10 0 10 0 0 - │ │ <--- '\x12626c610001' --- '\x126c61680001' --- '\x126c61680002' - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=20, distinct(2)=4, null(2)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 10 0 10 0 0 + │ <--- '\x12626c610001' --- '\x126c61680001' --- '\x126c61680002' └── filters └── a:1 LIKE '%blah%' [type=bool, outer=(1), constraints=(/1: (/NULL - ])] @@ -533,11 +525,9 @@ select │ │ ├── ["\x12ooo\x00\x01", "\x12ooo\x00\x01"] │ │ └── ["\x12zoo\x00\x01", "\x12zoo\x00\x01"] │ ├── flags: force-index=inv - │ ├── stats: [rows=1980, distinct(2)=396, null(2)=0, distinct(5)=2, null(5)=0] - │ │ histogram(5)= 0 990 0 990 - │ │ <--- '\x126f6f6f0001' --- '\x127a6f6f0001' - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=1980, distinct(2)=396, null(2)=0, distinct(5)=2, null(5)=0] + │ histogram(5)= 0 990 0 990 + │ <--- '\x126f6f6f0001' --- '\x127a6f6f0001' └── filters └── a:1 LIKE '%zooo%' [type=bool, outer=(1), constraints=(/1: (/NULL - ])] diff --git a/pkg/sql/opt/memo/testdata/stats/partial-index-scan b/pkg/sql/opt/memo/testdata/stats/partial-index-scan index 54732016d1a1..790d8fd95522 100644 --- a/pkg/sql/opt/memo/testdata/stats/partial-index-scan +++ b/pkg/sql/opt/memo/testdata/stats/partial-index-scan @@ -1252,8 +1252,7 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── stats: [rows=16.66667, distinct(1)=16.6667, null(1)=0, distinct(3)=3, null(3)=0, distinct(7)=16.6667, null(7)=0] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1298,8 +1297,7 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── stats: [rows=16.66667, distinct(1)=16.6667, null(1)=0, distinct(3)=3, null(3)=0, distinct(7)=16.6667, null(7)=0] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters ├── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] └── s:3 = 'banana' [type=bool, outer=(3), constraints=(/3: [/'banana' - /'banana']; tight), fd=()-->(3)] @@ -1365,8 +1363,7 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── stats: [rows=8.547009, distinct(1)=8.54701, null(1)=0, distinct(3)=3, null(3)=0, distinct(7)=8.54701, null(7)=0] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1409,8 +1406,7 @@ project │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] │ ├── stats: [rows=8.547009, distinct(1)=8.54701, null(1)=0, distinct(3)=3, null(3)=0, distinct(7)=8.54701, null(7)=0] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters ├── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] └── s:3 = 'banana' [type=bool, outer=(3), constraints=(/3: [/'banana' - /'banana']; tight), fd=()-->(3)] @@ -1503,8 +1499,7 @@ project │ │ <--- 'banana' --- 'cherry' │ │ histogram(7)= 0 0 1.8225e-10 100 18.757 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1555,8 +1550,7 @@ project │ │ <--- 'banana' --- 'cherry' │ │ histogram(7)= 0 0 1.8225e-10 100 18.757 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters ├── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] └── s:3 = 'banana' [type=bool, outer=(3), constraints=(/3: [/'banana' - /'banana']; tight), fd=()-->(3)] @@ -1643,8 +1637,7 @@ project │ │ <--- 'banana' --- 'cherry' │ │ histogram(7)= 0 0 1.8657e-10 102.37 19.201 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters └── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] @@ -1693,8 +1686,7 @@ project │ │ <--- 'banana' --- 'cherry' │ │ histogram(7)= 0 0 1.8657e-10 102.37 19.201 0 0 0 │ │ <--- '\x42fd1000000000000001' ------------ '\x42fd1000000100000000' -------- '\x42fd1200000000000000' --- '\x42fd1400000000000001' - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── key: (1,7) └── filters ├── st_intersects('010200000002000000000000000000E03F000000000000E03F666666666666E63F666666666666E63F', g:2) [type=bool, outer=(2), immutable, constraints=(/2: (/NULL - ])] └── s:3 = 'banana' [type=bool, outer=(3), constraints=(/3: [/'banana' - /'banana']; tight), fd=()-->(3)] diff --git a/pkg/sql/opt/memo/testdata/stats/scan b/pkg/sql/opt/memo/testdata/stats/scan index d751358d8158..43d5691c1ea6 100644 --- a/pkg/sql/opt/memo/testdata/stats/scan +++ b/pkg/sql/opt/memo/testdata/stats/scan @@ -2217,10 +2217,8 @@ select │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x12\x00\x00\x00\x00\x00\x00\x00") │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── stats: [rows=1, distinct(2)=1, null(2)=0, distinct(5)=1, null(5)=0] - │ │ histogram(5)= - │ ├── key: (2) - │ └── fd: (2)-->(5) + │ └── stats: [rows=1, distinct(2)=1, null(2)=0, distinct(5)=1, null(5)=0] + │ histogram(5)= └── filters └── st_coveredby(geom:1, '0101000000000000000000F03F000000000000F03F') [type=bool, outer=(1), immutable, constraints=(/1: (/NULL - ])] diff --git a/pkg/sql/opt/norm/BUILD.bazel b/pkg/sql/opt/norm/BUILD.bazel index 4b2c94c8d4d7..f90ebaf5055b 100644 --- a/pkg/sql/opt/norm/BUILD.bazel +++ b/pkg/sql/opt/norm/BUILD.bazel @@ -91,6 +91,7 @@ go_test( "//pkg/sql/sem/catconstants", "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", + "//pkg/sql/sessiondatapb", "//pkg/sql/types", "//pkg/testutils/datapathutils", "//pkg/util/leaktest", diff --git a/pkg/sql/opt/norm/factory_test.go b/pkg/sql/opt/norm/factory_test.go index 756f45e8a7b6..d8adff216cda 100644 --- a/pkg/sql/opt/norm/factory_test.go +++ b/pkg/sql/opt/norm/factory_test.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/types" ) @@ -84,6 +85,7 @@ func TestCopyAndReplace(t *testing.T) { } evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) + evalCtx.SessionData().PlanCacheMode = sessiondatapb.PlanCacheModeAuto var o xform.Optimizer testutils.BuildQuery(t, &o, cat, &evalCtx, "SELECT * FROM ab WHERE a = $1") diff --git a/pkg/sql/opt/norm/testdata/rules/comp b/pkg/sql/opt/norm/testdata/rules/comp index 0c594f77eab3..f4b30db924ff 100644 --- a/pkg/sql/opt/norm/testdata/rules/comp +++ b/pkg/sql/opt/norm/testdata/rules/comp @@ -1025,12 +1025,10 @@ scalar-group-by │ │ ├── key: (4) │ │ └── scan geom_geog@geom_geog_geom_idx,inverted │ │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ │ ├── inverted constraint: /7/4 - │ │ │ └── spans - │ │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] - │ │ ├── key: (4) - │ │ └── fd: (4)-->(7) + │ │ └── inverted constraint: /7/4 + │ │ └── spans + │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] │ └── filters │ └── geom:1 && '0103000020E610000001000000050000000000000000000000000000000000000000000000000000000000000000005940000000000000594000000000000059400000000000005940000000000000000000000000000000000000000000000000' [outer=(1), immutable, constraints=(/1: (/NULL - ])] └── aggregations diff --git a/pkg/sql/opt/ops/relational.opt b/pkg/sql/opt/ops/relational.opt index 43be81ae6e81..e62ce6f5aef9 100644 --- a/pkg/sql/opt/ops/relational.opt +++ b/pkg/sql/opt/ops/relational.opt @@ -229,6 +229,11 @@ define InvertedFilterPrivate { # PreFiltererState represents the optional pre-filtering state. PreFiltererState PreFiltererState + # PKCols is the set of primary key columns in the underlying table of the + # input inverted index scan.. It is used to build FDs for the + # InvertedFilter. + PKCols ColSet + # The InvertedColumn is the id of the inverted column in the input. It is # used during execution to map rows from the input to their corresponding # spans in the SpanExpression. diff --git a/pkg/sql/opt/optbuilder/create_function.go b/pkg/sql/opt/optbuilder/create_function.go index 82842714292f..94b042e1ecae 100644 --- a/pkg/sql/opt/optbuilder/create_function.go +++ b/pkg/sql/opt/optbuilder/create_function.go @@ -192,8 +192,13 @@ func (b *Builder) buildCreateFunction(cf *tree.CreateRoutine, inScope *scope) (o if param.Class == tree.RoutineParamInOut && param.Name == "" { panic(unimplemented.NewWithIssue(121251, "unnamed INOUT parameters are not yet supported")) } - if param.IsInParam() && typ.IsPolymorphicType() { - sawPolymorphicInParam = true + if param.IsInParam() { + if typ.Family() == types.VoidFamily { + panic(pgerror.Newf(pgcode.InvalidFunctionDefinition, "SQL functions cannot have arguments of type VOID")) + } + if typ.IsPolymorphicType() { + sawPolymorphicInParam = true + } } if param.IsOutParam() { outParamTypes = append(outParamTypes, typ) diff --git a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go index dcf604eb2a93..1b530d9fc311 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go @@ -894,7 +894,7 @@ func (g *newRuleGen) genBoundStatements(e lang.Expr) { // that genNestedExpr can generate references to those statements. func (g *newRuleGen) genNestedExpr(e lang.Expr) { if label, ok := g.boundStmts[e]; ok { - g.w.write(label) + g.w.write("%s", label) return } @@ -915,10 +915,10 @@ func (g *newRuleGen) genNestedExpr(e lang.Expr) { // untyped version. varName = typed } - g.w.write(varName) + g.w.write("%s", varName) case *lang.LetExpr: - g.w.write(string(t.Result.Label)) + g.w.write("%s", string(t.Result.Label)) case *lang.StringExpr: // Literal string expressions construct DString datums. diff --git a/pkg/sql/opt/props/histogram.go b/pkg/sql/opt/props/histogram.go index e4a6e3c83151..4ff581784ede 100644 --- a/pkg/sql/opt/props/histogram.go +++ b/pkg/sql/opt/props/histogram.go @@ -182,6 +182,17 @@ func (h *Histogram) maxDistinctValuesCount() float64 { return count } +// MaxFrequency returns the maximum value of NumEq across all histogram buckets. +func (h *Histogram) MaxFrequency() float64 { + var mf float64 + for i := range h.buckets { + if numEq := h.numEq(i); numEq > mf { + mf = numEq + } + } + return mf +} + // maxDistinctValuesInRange returns the maximum number of distinct values in // the range (lowerBound, upperBound). It returns ok=false when it is not // possible to determine a finite value (which is the case for all types other diff --git a/pkg/sql/opt/props/histogram_test.go b/pkg/sql/opt/props/histogram_test.go index c2798b058b17..b5387c950acd 100644 --- a/pkg/sql/opt/props/histogram_test.go +++ b/pkg/sql/opt/props/histogram_test.go @@ -139,27 +139,34 @@ func TestHistogram(t *testing.T) { if distinct != expected { t.Fatalf("expected %f but found %f", expected, distinct) } + maxFrequency, expected := h.MaxFrequency(), float64(35) + if maxFrequency != expected { + t.Fatalf("expected %f but found %f", expected, maxFrequency) + } testData := []struct { - constraint string - buckets []cat.HistogramBucket - count float64 - maxDistinct float64 - distinct float64 + constraint string + buckets []cat.HistogramBucket + count float64 + maxDistinct float64 + distinct float64 + maxFrequency float64 }{ { - constraint: "/1: [/0 - /0]", - buckets: []cat.HistogramBucket{}, - count: 0, - maxDistinct: 0, - distinct: 0, + constraint: "/1: [/0 - /0]", + buckets: []cat.HistogramBucket{}, + count: 0, + maxDistinct: 0, + distinct: 0, + maxFrequency: 0, }, { - constraint: "/1: [/50 - /100]", - buckets: []cat.HistogramBucket{}, - count: 0, - maxDistinct: 0, - distinct: 0, + constraint: "/1: [/50 - /100]", + buckets: []cat.HistogramBucket{}, + count: 0, + maxDistinct: 0, + distinct: 0, + maxFrequency: 0, }, { constraint: "/1: [ - /1] [/11 - /24] [/30 - /45]", @@ -172,9 +179,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 40, NumEq: 35, DistinctRange: 7, UpperBound: tree.NewDInt(42)}, }, - count: 80, - maxDistinct: 17, - distinct: 11.14, + count: 80, + maxDistinct: 17, + distinct: 11.14, + maxFrequency: 35, }, { constraint: "/1: [/5 - /10] [/15 - /32] [/34 - /36] [/38 - ]", @@ -192,9 +200,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(37)}, {NumRange: 14.55, NumEq: 35, DistinctRange: 2.55, UpperBound: tree.NewDInt(42)}, }, - count: 80.46, - maxDistinct: 16.73, - distinct: 12.13, + count: 80.46, + maxDistinct: 16.73, + distinct: 12.13, + maxFrequency: 35, }, { constraint: "/1: [ - /41]", @@ -207,9 +216,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 36.36, NumEq: 3.64, DistinctRange: 6.36, UpperBound: tree.NewDInt(41)}, }, - count: 56, - maxDistinct: 21, - distinct: 14.36, + count: 56, + maxDistinct: 21, + distinct: 14.36, + maxFrequency: 5, }, { constraint: "/1: [/1 - ]", @@ -222,9 +232,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 40, NumEq: 35, DistinctRange: 7, UpperBound: tree.NewDInt(42)}, }, - count: 91, - maxDistinct: 22, - distinct: 15, + count: 91, + maxDistinct: 22, + distinct: 15, + maxFrequency: 35, }, { constraint: "/1: [/40 - /40]", @@ -233,9 +244,10 @@ func TestHistogram(t *testing.T) { buckets: []cat.HistogramBucket{ {NumRange: 0, NumEq: 5.71, DistinctRange: 0, UpperBound: tree.NewDInt(40)}, }, - count: 5.71, - maxDistinct: 1, - distinct: 1, + count: 5.71, + maxDistinct: 1, + distinct: 1, + maxFrequency: 5.71, }, { constraint: "/1: [/0 - /100]", @@ -248,9 +260,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, DistinctRange: 0, NumEq: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 40, DistinctRange: 7, NumEq: 35, UpperBound: tree.NewDInt(42)}, }, - count: 91, - maxDistinct: 22, - distinct: 15, + count: 91, + maxDistinct: 22, + distinct: 15, + maxFrequency: 35, }, // Tests with multiple columns. @@ -265,9 +278,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 40, NumEq: 35, DistinctRange: 7, UpperBound: tree.NewDInt(42)}, }, - count: 80, - maxDistinct: 17, - distinct: 11.14, + count: 80, + maxDistinct: 17, + distinct: 11.14, + maxFrequency: 35, }, { constraint: "/2/1: [/3 - /3/1] [/3/11 - /3/24] [/3/30 - /3/45]", @@ -280,9 +294,10 @@ func TestHistogram(t *testing.T) { {NumRange: 0, NumEq: 0, DistinctRange: 0, UpperBound: tree.NewDInt(30)}, {NumRange: 40, NumEq: 35, DistinctRange: 7, UpperBound: tree.NewDInt(42)}, }, - count: 80, - maxDistinct: 17, - distinct: 11.14, + count: 80, + maxDistinct: 17, + distinct: 11.14, + maxFrequency: 35, }, { constraint: "/2/1/3: [/1/40/2 - /1/40/3]", @@ -291,9 +306,10 @@ func TestHistogram(t *testing.T) { buckets: []cat.HistogramBucket{ {NumRange: 0, NumEq: 5.71, DistinctRange: 0, UpperBound: tree.NewDInt(40)}, }, - count: 5.71, - maxDistinct: 1, - distinct: 1, + count: 5.71, + maxDistinct: 1, + distinct: 1, + maxFrequency: 5.71, }, { constraint: "/2/1/3: [/1/40/2 - /1/40/2] [/1/40/4 - /1/40/4] [/1/40/6 - /1/40/6]", @@ -302,9 +318,10 @@ func TestHistogram(t *testing.T) { buckets: []cat.HistogramBucket{ {NumRange: 0, NumEq: 5.71, DistinctRange: 0, UpperBound: tree.NewDInt(40)}, }, - count: 5.71, - maxDistinct: 1, - distinct: 1, + count: 5.71, + maxDistinct: 1, + distinct: 1, + maxFrequency: 5.71, }, } @@ -330,6 +347,10 @@ func TestHistogram(t *testing.T) { if testData[i].distinct != distinct { t.Fatalf("expected %f but found %f", testData[i].distinct, distinct) } + maxFrequency := roundVal(filtered.MaxFrequency()) + if testData[i].maxFrequency != maxFrequency { + t.Fatalf("expected %f but found %f", testData[i].maxFrequency, maxFrequency) + } roundHistogram(filtered) if !reflect.DeepEqual(testData[i].buckets, filtered.buckets) { t.Fatalf("expected %v but found %v", testData[i].buckets, filtered.buckets) @@ -1162,6 +1183,11 @@ func BenchmarkHistogram(b *testing.B) { h.DistinctValuesCount() } }) + b.Run("MaxFrequency", func(b *testing.B) { + for i := 0; i < b.N; i++ { + h.MaxFrequency() + } + }) b.Run("Filter", func(b *testing.B) { for i := 0; i < b.N; i++ { h.Filter(ctx, c) diff --git a/pkg/sql/opt/testutils/opttester/BUILD.bazel b/pkg/sql/opt/testutils/opttester/BUILD.bazel index 0ae32ad6f3b2..17368a6684c0 100644 --- a/pkg/sql/opt/testutils/opttester/BUILD.bazel +++ b/pkg/sql/opt/testutils/opttester/BUILD.bazel @@ -45,6 +45,7 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/sem/volatility", + "//pkg/sql/sessiondatapb", "//pkg/sql/stats", "//pkg/testutils/datapathutils", "//pkg/testutils/floatcmp", diff --git a/pkg/sql/opt/testutils/opttester/opt_tester.go b/pkg/sql/opt/testutils/opttester/opt_tester.go index 08dd81b80d20..b5424907d8b7 100644 --- a/pkg/sql/opt/testutils/opttester/opt_tester.go +++ b/pkg/sql/opt/testutils/opttester/opt_tester.go @@ -60,6 +60,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/floatcmp" @@ -168,6 +169,9 @@ type Flags struct { // memo/check_expr.go. DisableCheckExpr bool + // Generic enables optimizations for generic query plans. + Generic bool + // ExploreTraceRule restricts the ExploreTrace output to only show the effects // of a specific rule. ExploreTraceRule opt.RuleName @@ -477,6 +481,11 @@ func New(catalog cat.Catalog, sqlStr string) *OptTester { // // - disable-check-expr: skips the assertions in memo/check_expr.go. // +// - generic: enables optimizations for generic query plans. +// NOTE: This flag sets the plan_cache_mode session setting to "auto", which +// cannot be done via the "set" flag because it requires a CCL license, +// which optimizer tests are not set up to utilize. +// // - rule: used with exploretrace; the value is the name of a rule. When // specified, the exploretrace output is filtered to only show expression // changes due to that specific rule. @@ -502,12 +511,6 @@ func New(catalog cat.Catalog, sqlStr string) *OptTester { // - table: used to set the current table used by the command. This is used by // the inject-stats command. // -// - stats-quality-prefix: must be used with the stats-quality command. If -// rewriteActualFlag=true, indicates that a table should be created with the -// given prefix for the output of each subexpression in the query. Otherwise, -// outputs the name of the table that would be created for each -// subexpression. -// // - ignore-tables: specifies the set of stats tables for which stats quality // comparisons should not be outputted. Only used with the stats-quality // command. Note that tables can always be added to the `ignore-tables` set @@ -523,16 +526,6 @@ func New(catalog cat.Catalog, sqlStr string) *OptTester { // // - inject-stats: the file path is relative to the test file. // -// - join-limit: sets the value for SessionData.ReorderJoinsLimit, which -// indicates the number of joins at which the optimizer should stop -// attempting to reorder. -// -// - prefer-lookup-joins-for-fks: sets SessionData.PreferLookupJoinsForFKs to -// true, causing foreign key operations to prefer lookup joins. -// -// - null-ordered-last: sets SessionData.NullOrderedLast to true, which orders -// NULL values last in ascending order. -// // - cascade-levels: used to limit the depth of recursive cascades for // build-cascades. // @@ -1003,6 +996,9 @@ func (f *Flags) Set(arg datadriven.CmdArg) error { case "disable-check-expr": f.DisableCheckExpr = true + case "generic": + f.evalCtx.SessionData().PlanCacheMode = sessiondatapb.PlanCacheModeAuto + case "rule": if len(arg.Vals) != 1 { return fmt.Errorf("rule requires one argument") diff --git a/pkg/sql/opt/testutils/opttester/reorder_joins.go b/pkg/sql/opt/testutils/opttester/reorder_joins.go index 637fd0d176f0..d1d6856e3472 100644 --- a/pkg/sql/opt/testutils/opttester/reorder_joins.go +++ b/pkg/sql/opt/testutils/opttester/reorder_joins.go @@ -55,11 +55,11 @@ func (ot *OptTester) ReorderJoins() (string, error) { if treeNum > 1 { // This isn't the first Reorder call. Output the number of joins added to // the memo by the last call to Reorder. - ot.output(fmt.Sprintf("Joins Considered: %v\n", joinsConsidered)) + ot.output("Joins Considered: %v\n", joinsConsidered) joinsConsidered = 0 } ot.separator("-") - ot.output(fmt.Sprintf("Join Tree #%d\n", treeNum)) + ot.output("Join Tree #%d\n", treeNum) ot.separator("-") ot.indent(o.FormatExpr(join, memo.ExprFmtHideAll, false /* redactableValues */)) ot.output("Vertexes\n") @@ -78,7 +78,7 @@ func (ot *OptTester) ReorderJoins() (string, error) { func(left, right, all, joinRefs, selRefs []memo.RelExpr, op opt.Operator) { relsToJoin := jof.formatVertexSet(all) if relsToJoin != relsJoinedLast { - ot.output(fmt.Sprintf("Joining %s\n", relsToJoin)) + ot.output("Joining %s\n", relsToJoin) relsJoinedLast = relsToJoin } var selString string @@ -102,11 +102,11 @@ func (ot *OptTester) ReorderJoins() (string, error) { if err != nil { return "", err } - ot.output(fmt.Sprintf("Joins Considered: %d\n", joinsConsidered)) + ot.output("Joins Considered: %d\n", joinsConsidered) ot.separator("=") ot.output("Final Plan\n") ot.separator("=") - ot.output(ot.FormatExpr(expr)) + ot.output("%s", ot.FormatExpr(expr)) return ot.builder.String(), err } diff --git a/pkg/sql/opt/testutils/testcat/vtable.go b/pkg/sql/opt/testutils/testcat/vtable.go index 8cb7f7a0cbea..eea2d7912cd5 100644 --- a/pkg/sql/opt/testutils/testcat/vtable.go +++ b/pkg/sql/opt/testutils/testcat/vtable.go @@ -138,6 +138,7 @@ var systemTables = []string{ systemschema.SystemMVCCStatisticsSchema, systemschema.TxnExecutionStatsTableSchema, systemschema.StatementExecutionStatsTableSchema, + systemschema.TableMetadataTableSchema, } func init() { diff --git a/pkg/sql/opt/xform/BUILD.bazel b/pkg/sql/opt/xform/BUILD.bazel index 83415511a021..987994e8ef6a 100644 --- a/pkg/sql/opt/xform/BUILD.bazel +++ b/pkg/sql/opt/xform/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/sem/volatility", + "//pkg/sql/sessiondatapb", "//pkg/sql/types", "//pkg/util/buildutil", "//pkg/util/cancelchecker", diff --git a/pkg/sql/opt/xform/generic_funcs.go b/pkg/sql/opt/xform/generic_funcs.go index 362e7d34ef78..805d431f728f 100644 --- a/pkg/sql/opt/xform/generic_funcs.go +++ b/pkg/sql/opt/xform/generic_funcs.go @@ -17,9 +17,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/types" ) +// GenericRulesEnabled returns true if rules for optimizing generic query plans +// are enabled, based on the plan_cache_mode session setting. +func (c *CustomFuncs) GenericRulesEnabled() bool { + return c.e.evalCtx.SessionData().PlanCacheMode != sessiondatapb.PlanCacheModeForceCustom +} + // HasPlaceholdersOrStableExprs returns true if the given relational expression's subtree has // at least one placeholder. func (c *CustomFuncs) HasPlaceholdersOrStableExprs(e memo.RelExpr) bool { diff --git a/pkg/sql/opt/xform/index_scan_builder.go b/pkg/sql/opt/xform/index_scan_builder.go index 097c2c0e4ca6..07ff6c8a53f2 100644 --- a/pkg/sql/opt/xform/index_scan_builder.go +++ b/pkg/sql/opt/xform/index_scan_builder.go @@ -93,6 +93,7 @@ func (b *indexScanBuilder) AddConstProjections(proj memo.ProjectionsExpr) { func (b *indexScanBuilder) AddInvertedFilter( spanExpr *inverted.SpanExpression, pfState *invertedexpr.PreFiltererStateForInvertedFilterer, + pkCols opt.ColSet, invertedCol opt.ColumnID, ) { if spanExpr != nil { @@ -105,6 +106,7 @@ func (b *indexScanBuilder) AddInvertedFilter( b.invertedFilterPrivate = memo.InvertedFilterPrivate{ InvertedExpression: spanExpr, PreFiltererState: pfState, + PKCols: pkCols, InvertedColumn: invertedCol, } } diff --git a/pkg/sql/opt/xform/limit_funcs.go b/pkg/sql/opt/xform/limit_funcs.go index eb6bad33441f..9b28a44c7247 100644 --- a/pkg/sql/opt/xform/limit_funcs.go +++ b/pkg/sql/opt/xform/limit_funcs.go @@ -74,6 +74,12 @@ func (c *CustomFuncs) CanLimitFilteredScan( return ok } +// PushLimitIntoProjectFilteredScanEnabled returns true if its eponymous rule is +// enabled via its session setting. +func (c *CustomFuncs) PushLimitIntoProjectFilteredScanEnabled() bool { + return c.e.evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan +} + // GenerateLimitedScans enumerates all non-inverted and non-partial secondary // indexes on the Scan operator's table and tries to create new limited Scan // operators from them. Since this only needs to be done once per table, diff --git a/pkg/sql/opt/xform/rules/generic.opt b/pkg/sql/opt/xform/rules/generic.opt index c49631511e65..4b89e4ab00c9 100644 --- a/pkg/sql/opt/xform/rules/generic.opt +++ b/pkg/sql/opt/xform/rules/generic.opt @@ -29,7 +29,9 @@ # [GenerateParameterizedJoin, Explore] (Select - $scan:(Scan $scanPrivate:*) & (IsCanonicalScan $scanPrivate) + $scan:(Scan $scanPrivate:*) & + (GenericRulesEnabled) & + (IsCanonicalScan $scanPrivate) $filters:* & (HasPlaceholdersOrStableExprs (Root)) & (Let diff --git a/pkg/sql/opt/xform/rules/limit.opt b/pkg/sql/opt/xform/rules/limit.opt index daad3cc25bef..b57f1abc1cd8 100644 --- a/pkg/sql/opt/xform/rules/limit.opt +++ b/pkg/sql/opt/xform/rules/limit.opt @@ -35,6 +35,59 @@ => (Scan (LimitScanPrivate $scanPrivate $limit $ordering)) +# PushLimitIntoProjectFilteredScan is similar to PushLimitIntoFilteredScan, but +# matches when there is a Project expression between the Limit and the filtered +# Scan. +# +# This rule is useful when GenerateConstrainedScans generates a Project above a +# partial index Scan that produces a column(s) held constant by the partial +# index predicate. For example, consider the schema and query: +# +# CREATE TABLE t (a INT PRIMARY KEY, b INT, INDEX a_b_idx (a) WHERE b = 1) +# SELECT * FROM t WHERE a > 0 AND b = 1 LIMIT 1 +# +# After GenerateConstrainedScans fires the memo will contain an expression tree +# like: +# +# limit +# ├── columns: a:1 b:2 +# ├── project +# │ ├── columns: a:1 b:2 a:1 +# │ ├── scan t@a_b_idx,partial +# │ │ ├── columns: a:1 +# │ │ └── constraint: /1: [/1 - ] +# │ └── projections +# │ └── 1 [as=b:2] +# └── 5 +# +# While the Project producing b:2 is beneficial because it eliminates the need +# for an IndexJoin, it also prevents PushLimitIntoFilteredScan from pushing the +# limit into the scan. PushLimitIntoProjectFilteredScan matches this specific +# pattern to solve the issue. +# +# This rule is similar to the PushLimitIntoProject normalization rule. +# Unfortunately, that rule does not apply here because normalization rules +# cannot not fire on parent expressions when alternative expressions are +# generated for their child groups during exploration. So, an exploration rule +# with similar behavior is necessary. +[PushLimitIntoProjectFilteredScan, Explore] +(Limit + (Project + (Scan $scanPrivate:*) + $projections:* + $passthrough:* + ) & + (PushLimitIntoProjectFilteredScanEnabled) + (Const $limit:* & (IsPositiveInt $limit)) + $ordering:* & (CanLimitFilteredScan $scanPrivate $ordering) +) +=> +(Project + (Scan (LimitScanPrivate $scanPrivate $limit $ordering)) + $projections + $passthrough +) + # PushLimitIntoIndexJoin pushes a limit through an index join. Since index # lookup can be expensive, it's always better to discard rows beforehand. [PushLimitIntoIndexJoin, Explore] diff --git a/pkg/sql/opt/xform/select_funcs.go b/pkg/sql/opt/xform/select_funcs.go index e206d48df977..182aab238659 100644 --- a/pkg/sql/opt/xform/select_funcs.go +++ b/pkg/sql/opt/xform/select_funcs.go @@ -921,7 +921,7 @@ func (c *CustomFuncs) GenerateInvertedIndexScans( // Add an inverted filter if needed. if needInvertedFilter { - sb.AddInvertedFilter(spanExpr, pfState, invertedCol) + sb.AddInvertedFilter(spanExpr, pfState, pkCols, invertedCol) } // If remaining filters exists, split them into two parts: one that can diff --git a/pkg/sql/opt/xform/testdata/external/activerecord b/pkg/sql/opt/xform/testdata/external/activerecord index 85f36cbf3b52..10afc7344f4e 100644 --- a/pkg/sql/opt/xform/testdata/external/activerecord +++ b/pkg/sql/opt/xform/testdata/external/activerecord @@ -201,7 +201,7 @@ sort │ │ └── filters │ │ ├── column86:86 = object_id:82 [outer=(82,86), constraints=(/82: (/NULL - ]; /86: (/NULL - ]), fd=(82)==(86), (86)==(82)] │ │ ├── sub_id:83 = attnum:6 [outer=(6,83), constraints=(/6: (/NULL - ]; /83: (/NULL - ]), fd=(6)==(83), (83)==(6)] - │ │ └── attrelid:1 < 4294966970 [outer=(1), constraints=(/1: (/NULL - /4294966969]; tight)] + │ │ └── attrelid:1 < 4294966969 [outer=(1), constraints=(/1: (/NULL - /4294966968]; tight)] │ └── aggregations │ ├── const-agg [as=attname:2, outer=(2)] │ │ └── attname:2 diff --git a/pkg/sql/opt/xform/testdata/external/hibernate b/pkg/sql/opt/xform/testdata/external/hibernate index 6a1d589183df..37ec682218cb 100644 --- a/pkg/sql/opt/xform/testdata/external/hibernate +++ b/pkg/sql/opt/xform/testdata/external/hibernate @@ -886,7 +886,7 @@ project └── filters └── min:14 = $1 [outer=(14), constraints=(/14: (/NULL - ]), fd=()-->(14)] -opt +opt generic select person0_.id as id1_2_, person0_.address as address2_2_, @@ -951,7 +951,7 @@ project └── filters └── max:16 = 0 [outer=(16), constraints=(/16: [/0 - /0]; tight), fd=()-->(16)] -opt +opt generic select person0_.id as id1_2_, person0_.address as address2_2_, @@ -1016,7 +1016,7 @@ project │ └── filters (true) └── filters (true) -opt +opt generic select person0_.id as id1_2_, person0_.address as address2_2_, diff --git a/pkg/sql/opt/xform/testdata/external/liquibase b/pkg/sql/opt/xform/testdata/external/liquibase index 0cfad963062f..790605fcbe03 100644 --- a/pkg/sql/opt/xform/testdata/external/liquibase +++ b/pkg/sql/opt/xform/testdata/external/liquibase @@ -206,7 +206,7 @@ project │ │ │ │ │ │ │ │ ├── scan kv_catalog_comments │ │ │ │ │ │ │ │ │ └── columns: crdb_internal.kv_catalog_comments.classoid:176!null crdb_internal.kv_catalog_comments.objoid:177!null crdb_internal.kv_catalog_comments.objsubid:178!null crdb_internal.kv_catalog_comments.description:179!null │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:176 != 4294967085 [outer=(176), constraints=(/176: (/NULL - /4294967084] [/4294967086 - ]; tight)] + │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:176 != 4294967084 [outer=(176), constraints=(/176: (/NULL - /4294967083] [/4294967085 - ]; tight)] │ │ │ │ │ │ │ └── projections │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.objsubid:178::INT8 [as=objsubid:185, outer=(178), immutable] │ │ │ │ │ │ └── filters diff --git a/pkg/sql/opt/xform/testdata/external/navicat b/pkg/sql/opt/xform/testdata/external/navicat index 69542b82d1f2..d9e3528e4e4c 100644 --- a/pkg/sql/opt/xform/testdata/external/navicat +++ b/pkg/sql/opt/xform/testdata/external/navicat @@ -210,7 +210,7 @@ sort │ │ │ │ │ │ │ │ ├── scan kv_catalog_comments │ │ │ │ │ │ │ │ │ └── columns: crdb_internal.kv_catalog_comments.classoid:176!null crdb_internal.kv_catalog_comments.objoid:177!null crdb_internal.kv_catalog_comments.objsubid:178!null crdb_internal.kv_catalog_comments.description:179!null │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:176 != 4294967085 [outer=(176), constraints=(/176: (/NULL - /4294967084] [/4294967086 - ]; tight)] + │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:176 != 4294967084 [outer=(176), constraints=(/176: (/NULL - /4294967083] [/4294967085 - ]; tight)] │ │ │ │ │ │ │ └── projections │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.objsubid:178::INT8 [as=objsubid:185, outer=(178), immutable] │ │ │ │ │ │ └── filters diff --git a/pkg/sql/opt/xform/testdata/external/nova b/pkg/sql/opt/xform/testdata/external/nova index db8f578f49fb..a0d420805bcd 100644 --- a/pkg/sql/opt/xform/testdata/external/nova +++ b/pkg/sql/opt/xform/testdata/external/nova @@ -107,7 +107,7 @@ create table instance_type_extra_specs ) ---- -opt +opt generic select anon_1.flavors_created_at as anon_1_flavors_created_at, anon_1.flavors_updated_at as anon_1_flavors_updated_at, anon_1.flavors_id as anon_1_flavors_id, @@ -710,7 +710,7 @@ sort └── filters └── instance_type_extra_specs_1.instance_type_id:22 = instance_types.id:1 [outer=(1,22), constraints=(/1: (/NULL - ]; /22: (/NULL - ]), fd=(1)==(22), (22)==(1)] -opt +opt generic select anon_1.instance_types_created_at as anon_1_instance_types_created_at, anon_1.instance_types_updated_at as anon_1_instance_types_updated_at, anon_1.instance_types_deleted_at as anon_1_instance_types_deleted_at, @@ -861,7 +861,7 @@ project │ └── instance_type_extra_specs_1.deleted:37 = $7 [outer=(37), constraints=(/37: (/NULL - ]), fd=()-->(37)] └── filters (true) -opt +opt generic select anon_1.instance_types_created_at as anon_1_instance_types_created_at, anon_1.instance_types_updated_at as anon_1_instance_types_updated_at, anon_1.instance_types_deleted_at as anon_1_instance_types_deleted_at, @@ -1026,7 +1026,7 @@ project │ └── instance_type_extra_specs_1.deleted:37 = $7 [outer=(37), constraints=(/37: (/NULL - ]), fd=()-->(37)] └── filters (true) -opt +opt generic select anon_1.flavors_created_at as anon_1_flavors_created_at, anon_1.flavors_updated_at as anon_1_flavors_updated_at, anon_1.flavors_id as anon_1_flavors_id, @@ -1166,7 +1166,7 @@ project │ └── filters (true) └── filters (true) -opt +opt generic select anon_1.flavors_created_at as anon_1_flavors_created_at, anon_1.flavors_updated_at as anon_1_flavors_updated_at, anon_1.flavors_id as anon_1_flavors_id, @@ -1609,7 +1609,7 @@ sort └── filters └── instance_type_extra_specs_1.instance_type_id:36 = instance_types.id:1 [outer=(1,36), constraints=(/1: (/NULL - ]; /36: (/NULL - ]), fd=(1)==(36), (36)==(1)] -opt +opt generic select anon_1.instance_types_created_at as anon_1_instance_types_created_at, anon_1.instance_types_updated_at as anon_1_instance_types_updated_at, anon_1.instance_types_deleted_at as anon_1_instance_types_deleted_at, @@ -2357,7 +2357,7 @@ sort └── filters └── instance_type_extra_specs_1.instance_type_id:50 = instance_types.id:1 [outer=(1,50), constraints=(/1: (/NULL - ]; /50: (/NULL - ]), fd=(1)==(50), (50)==(1)] -opt +opt generic select anon_1.instance_types_created_at as anon_1_instance_types_created_at, anon_1.instance_types_updated_at as anon_1_instance_types_updated_at, anon_1.instance_types_deleted_at as anon_1_instance_types_deleted_at, @@ -2511,7 +2511,7 @@ project │ └── instance_type_extra_specs_1.deleted:37 = $7 [outer=(37), constraints=(/37: (/NULL - ]), fd=()-->(37)] └── filters (true) -opt +opt generic select anon_1.flavors_created_at as anon_1_flavors_created_at, anon_1.flavors_updated_at as anon_1_flavors_updated_at, anon_1.flavors_id as anon_1_flavors_id, diff --git a/pkg/sql/opt/xform/testdata/external/pgjdbc b/pkg/sql/opt/xform/testdata/external/pgjdbc index 6b56b9b06272..f6faab2fc1f1 100644 --- a/pkg/sql/opt/xform/testdata/external/pgjdbc +++ b/pkg/sql/opt/xform/testdata/external/pgjdbc @@ -99,7 +99,7 @@ project │ │ │ │ │ │ │ │ │ ├── scan kv_catalog_comments │ │ │ │ │ │ │ │ │ │ └── columns: crdb_internal.kv_catalog_comments.classoid:76!null crdb_internal.kv_catalog_comments.objoid:77!null crdb_internal.kv_catalog_comments.objsubid:78!null crdb_internal.kv_catalog_comments.description:79!null │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:76 != 4294967085 [outer=(76), constraints=(/76: (/NULL - /4294967084] [/4294967086 - ]; tight)] + │ │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:76 != 4294967084 [outer=(76), constraints=(/76: (/NULL - /4294967083] [/4294967085 - ]; tight)] │ │ │ │ │ │ │ │ └── projections │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.objsubid:78::INT8 [as=objsubid:85, outer=(78), immutable] │ │ │ │ │ │ │ └── filters @@ -110,7 +110,7 @@ project │ │ │ │ │ │ ├── scan kv_builtin_function_comments │ │ │ │ │ │ │ └── columns: crdb_internal.kv_builtin_function_comments.oid:81!null crdb_internal.kv_builtin_function_comments.description:82!null │ │ │ │ │ │ └── projections - │ │ │ │ │ │ └── 4294967053 [as=classoid:83] + │ │ │ │ │ │ └── 4294967052 [as=classoid:83] │ │ │ │ │ ├── inner-join (hash) │ │ │ │ │ │ ├── columns: c.oid:91!null relname:92!null relnamespace:93!null n.oid:128!null nspname:129!null │ │ │ │ │ │ ├── fd: ()-->(92,129), (93)==(128), (128)==(93) @@ -293,7 +293,7 @@ sort │ │ │ │ │ │ │ │ │ │ ├── scan kv_catalog_comments │ │ │ │ │ │ │ │ │ │ │ └── columns: crdb_internal.kv_catalog_comments.classoid:109!null crdb_internal.kv_catalog_comments.objoid:110!null crdb_internal.kv_catalog_comments.objsubid:111!null crdb_internal.kv_catalog_comments.description:112!null │ │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:109 != 4294967085 [outer=(109), constraints=(/109: (/NULL - /4294967084] [/4294967086 - ]; tight)] + │ │ │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.classoid:109 != 4294967084 [outer=(109), constraints=(/109: (/NULL - /4294967083] [/4294967085 - ]; tight)] │ │ │ │ │ │ │ │ │ └── projections │ │ │ │ │ │ │ │ │ └── crdb_internal.kv_catalog_comments.objsubid:111::INT8 [as=objsubid:118, outer=(111), immutable] │ │ │ │ │ │ │ │ └── filters diff --git a/pkg/sql/opt/xform/testdata/rules/generic b/pkg/sql/opt/xform/testdata/rules/generic index 93df29d979a5..fed232cac6d0 100644 --- a/pkg/sql/opt/xform/testdata/rules/generic +++ b/pkg/sql/opt/xform/testdata/rules/generic @@ -15,7 +15,7 @@ CREATE TABLE t ( # GenerateParameterizedJoin # -------------------------------------------------- -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE k = $1 ---- project @@ -42,7 +42,7 @@ project │ └── ($1,) └── filters (true) -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE k = $1::INT ---- project @@ -69,7 +69,7 @@ project │ └── ($1,) └── filters (true) -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND s = $2 AND b = $3 ---- project @@ -103,7 +103,7 @@ project # A placeholder referenced multiple times in the filters should only appear once # in the Values expression. -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE k = $1 AND i = $1 ---- project @@ -133,7 +133,7 @@ project # The generated join should not be reordered and merge joins should not be # explored on it. -opt expect=GenerateParameterizedJoin expect-not=(ReorderJoins,GenerateMergeJoins) +opt generic expect=GenerateParameterizedJoin expect-not=(ReorderJoins,GenerateMergeJoins) SELECT * FROM t WHERE i = $1 ---- project @@ -165,7 +165,7 @@ project │ └── filters (true) └── filters (true) -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE k = (SELECT i FROM t WHERE k = $1) ---- project @@ -209,7 +209,7 @@ project # TODO(mgartner): The rule doesn't apply because the filters do not reference # the placeholder directly. Consider ways to handle cases like this. -opt +opt generic SELECT * FROM t WHERE k = (SELECT $1::INT) ---- project @@ -242,7 +242,7 @@ exec-ddl CREATE INDEX partial_idx ON t(t) WHERE t IS NOT NULL ---- -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE t = $1 ---- project @@ -282,7 +282,7 @@ exec-ddl CREATE INDEX partial_idx ON t(i, t) WHERE i IS NOT NULL AND t IS NOT NULL ---- -opt expect=GenerateParameterizedJoin +opt generic expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND t = $2 ---- project @@ -322,7 +322,7 @@ exec-ddl CREATE INDEX partial_idx ON t(s) WHERE k = i ---- -opt +opt generic SELECT * FROM t@partial_idx WHERE s = $1 AND k = $2 AND i = $2 ---- project @@ -362,7 +362,7 @@ exec-ddl DROP INDEX partial_idx ---- -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE t = now() ---- project @@ -394,7 +394,7 @@ project │ └── filters (true) └── filters (true) -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND t = now() ---- project @@ -426,7 +426,7 @@ project │ └── filters (true) └── filters (true) -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND t > now() ---- project @@ -461,7 +461,7 @@ project │ └── filters (true) └── filters (true) -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND t = now() + $2 ---- project @@ -506,7 +506,7 @@ project │ └── filters (true) └── filters (true) -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND t = now() + '1 hr'::INTERVAL ---- project @@ -552,7 +552,7 @@ project └── filters (true) # TODO(mgartner): Apply the rule to stable, non-leaf expressions. -opt no-stable-folds +opt generic no-stable-folds SELECT * FROM t WHERE t = '2024-01-01 12:00:00'::TIMESTAMP::TIMESTAMPTZ ---- select @@ -571,7 +571,7 @@ select # arguments. # TODO(mgartner): We should be able to relax this restriction as long as all the # arguments are constants or placeholders. -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND s = quote_literal(1::INT) ---- project @@ -607,7 +607,7 @@ project # A stable function is not included in the Values expression if its arguments # reference a column from the table. This would create an illegal outer column # reference in a non-apply-join. -opt no-stable-folds expect=GenerateParameterizedJoin +opt generic no-stable-folds expect=GenerateParameterizedJoin SELECT * FROM t WHERE i = $1 AND s = quote_literal(i) ---- project @@ -642,7 +642,7 @@ project # The rule does not match if there are no placeholders or stable expressions in # the filters. -opt expect-not=GenerateParameterizedJoin +opt generic expect-not=GenerateParameterizedJoin SELECT * FROM t WHERE i = 1 AND s = 'foo' ---- index-join t @@ -654,3 +654,21 @@ index-join t ├── constraint: /2/3/4/1: [/1/'foo' - /1/'foo'] ├── key: (1) └── fd: ()-->(2,3), (1)-->(4) + +# The rule does not match if generic optimizations are disabled. +opt expect-not=GenerateParameterizedJoin +SELECT * FROM t WHERE k = $1 AND s = quote_literal(1::INT) +---- +select + ├── columns: k:1!null i:2 s:3!null b:4 t:5 + ├── cardinality: [0 - 1] + ├── has-placeholder + ├── key: () + ├── fd: ()-->(1-5) + ├── scan t + │ ├── columns: k:1!null i:2 s:3 b:4 t:5 + │ ├── key: (1) + │ └── fd: (1)-->(2-5) + └── filters + ├── k:1 = $1 [outer=(1), constraints=(/1: (/NULL - ]), fd=()-->(1)] + └── s:3 = e'\'1\'' [outer=(3), constraints=(/3: [/e'\'1\'' - /e'\'1\'']; tight), fd=()-->(3)] diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join index b6d7974b867d..6d3cf4f5f192 100644 --- a/pkg/sql/opt/xform/testdata/rules/join +++ b/pkg/sql/opt/xform/testdata/rules/join @@ -7579,9 +7579,7 @@ project │ │ │ ├── columns: n.gid:14!null n.geom_inverted_key:20!null │ │ │ ├── inverted constraint: /20/14 │ │ │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ │ ├── flags: force-index=nyc_neighborhoods_geo_idx - │ │ │ ├── key: (14) - │ │ │ └── fd: (14)-->(20) + │ │ │ └── flags: force-index=nyc_neighborhoods_geo_idx │ │ └── filters │ │ └── st_covers('0102000000020000000000000000000000000000000000000000000000000000000000000000000040', n.geom:17) [outer=(17), immutable, constraints=(/17: (/NULL - ])] │ └── filters (true) diff --git a/pkg/sql/opt/xform/testdata/rules/limit b/pkg/sql/opt/xform/testdata/rules/limit index 2445d02f300e..ca8c89b24295 100644 --- a/pkg/sql/opt/xform/testdata/rules/limit +++ b/pkg/sql/opt/xform/testdata/rules/limit @@ -65,6 +65,17 @@ CREATE TABLE partial_index_tab ) ---- +exec-ddl +CREATE TABLE partial_index_const +( + a INT, + b INT, + c INT, + INDEX (a) STORING (c) WHERE b = 1, + INDEX (a) STORING (c) WHERE b IS NULL +) +---- + # Insert statistics for index_tab. Histogram buckets are included for the # latitude column in order to make the optimizer choose specific plans for # SplitLimitedScanIntoUnionScans tests. @@ -273,6 +284,193 @@ scan a@s_idx ├── key: () └── fd: ()-->(4) +# -------------------------------------------------- +# PushLimitIntoProjectFilteredScan +# -------------------------------------------------- + +opt expect=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT 5 +---- +project + ├── columns: a:1!null b:2!null c:3 + ├── cardinality: [0 - 5] + ├── fd: ()-->(2) + ├── scan partial_index_const@partial_index_const_a_idx,partial + │ ├── columns: a:1!null c:3 + │ ├── constraint: /1/4: [/1 - ] + │ └── limit: 5 + └── projections + └── 1 [as=b:2] + +opt expect=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b IS NULL LIMIT 5 +---- +project + ├── columns: a:1!null b:2 c:3 + ├── cardinality: [0 - 5] + ├── fd: ()-->(2) + ├── scan partial_index_const@partial_index_const_a_idx1,partial + │ ├── columns: a:1!null c:3 + │ ├── constraint: /1/4: [/1 - ] + │ └── limit: 5 + └── projections + └── CAST(NULL AS INT8) [as=b:2] + +opt expect=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT 5 OFFSET 10 +---- +offset + ├── columns: a:1!null b:2!null c:3 + ├── cardinality: [0 - 5] + ├── fd: ()-->(2) + ├── project + │ ├── columns: b:2!null a:1!null c:3 + │ ├── cardinality: [0 - 15] + │ ├── fd: ()-->(2) + │ ├── scan partial_index_const@partial_index_const_a_idx,partial + │ │ ├── columns: a:1!null c:3 + │ │ ├── constraint: /1/4: [/1 - ] + │ │ └── limit: 15 + │ └── projections + │ └── 1 [as=b:2] + └── 10 + +# PushLimitIntoProjectFilteredScan propagates row-level locking information. +opt expect=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT 5 FOR UPDATE +---- +project + ├── columns: a:1!null b:2!null c:3 + ├── cardinality: [0 - 5] + ├── volatile + ├── fd: ()-->(2) + ├── scan partial_index_const@partial_index_const_a_idx,partial + │ ├── columns: a:1!null c:3 + │ ├── constraint: /1/4: [/1 - ] + │ ├── limit: 5 + │ ├── locking: for-update + │ └── volatile + └── projections + └── 1 [as=b:2] + +opt set=(optimizer_use_lock_op_for_serializable=true) expect=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT 5 FOR UPDATE +---- +lock partial_index_const + ├── columns: a:1!null b:2!null c:3 [hidden: rowid:4!null] + ├── locking: for-update + ├── cardinality: [0 - 5] + ├── volatile, mutations + ├── key: (4) + ├── fd: ()-->(2), (4)-->(1,3) + └── project + ├── columns: b:2!null a:1!null c:3 rowid:4!null + ├── cardinality: [0 - 5] + ├── key: (4) + ├── fd: ()-->(2), (4)-->(1,3) + ├── scan partial_index_const@partial_index_const_a_idx,partial + │ ├── columns: a:1!null c:3 rowid:4!null + │ ├── constraint: /1/4: [/1 - ] + │ ├── limit: 5 + │ ├── key: (4) + │ └── fd: (4)-->(1,3) + └── projections + └── 1 [as=b:2] + +opt set=(optimizer_push_limit_into_project_filtered_scan=off) expect-not=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT 5 +---- +limit + ├── columns: a:1!null b:2!null c:3 + ├── cardinality: [0 - 5] + ├── fd: ()-->(2) + ├── project + │ ├── columns: b:2!null a:1!null c:3 + │ ├── fd: ()-->(2) + │ ├── limit hint: 5.00 + │ ├── scan partial_index_const@partial_index_const_a_idx,partial + │ │ ├── columns: a:1!null c:3 + │ │ ├── constraint: /1/4: [/1 - ] + │ │ └── limit hint: 5.00 + │ └── projections + │ └── 1 [as=b:2] + └── 5 + +# The rule does not apply when the limit is non-positive. +opt expect-not=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 LIMIT -5 +---- +limit + ├── columns: a:1!null b:2!null c:3 + ├── cardinality: [0 - 0] + ├── immutable + ├── key: () + ├── fd: ()-->(1-3) + ├── project + │ ├── columns: b:2!null a:1!null c:3 + │ ├── fd: ()-->(2) + │ ├── limit hint: 1.00 + │ ├── scan partial_index_const@partial_index_const_a_idx,partial + │ │ ├── columns: a:1!null c:3 + │ │ ├── constraint: /1/4: [/1 - ] + │ │ └── limit hint: 1.00 + │ └── projections + │ └── 1 [as=b:2] + └── -5 + +# The rule does not apply to non-filtered scans. +opt disable=PushLimitIntoProject expect-not=PushLimitIntoProjectFilteredScan +SELECT a, b, a+1 FROM partial_index_const LIMIT 5 +---- +limit + ├── columns: a:1 b:2 "?column?":7 + ├── cardinality: [0 - 5] + ├── immutable + ├── fd: (1)-->(7) + ├── project + │ ├── columns: "?column?":7 a:1 b:2 + │ ├── immutable + │ ├── fd: (1)-->(7) + │ ├── limit hint: 5.00 + │ ├── scan partial_index_const + │ │ ├── columns: a:1 b:2 + │ │ ├── partial index predicates + │ │ │ ├── partial_index_const_a_idx: filters + │ │ │ │ └── b:2 = 1 [outer=(2), constraints=(/2: [/1 - /1]; tight), fd=()-->(2)] + │ │ │ └── partial_index_const_a_idx1: filters + │ │ │ └── b:2 IS NULL [outer=(2), constraints=(/2: [/NULL - /NULL]; tight), fd=()-->(2)] + │ │ └── limit hint: 5.00 + │ └── projections + │ └── a:1 + 1 [as="?column?":7, outer=(1), immutable] + └── 5 + +# The rule does not apply to filtered scans that cannot provide the desired +# ordering. +opt disable=GenerateTopK expect-not=PushLimitIntoProjectFilteredScan +SELECT * FROM partial_index_const WHERE a > 0 AND b = 1 ORDER BY c LIMIT 5 +---- +limit + ├── columns: a:1!null b:2!null c:3 + ├── internal-ordering: +3 opt(2) + ├── cardinality: [0 - 5] + ├── fd: ()-->(2) + ├── ordering: +3 opt(2) [actual: +3] + ├── project + │ ├── columns: b:2!null a:1!null c:3 + │ ├── fd: ()-->(2) + │ ├── ordering: +3 opt(2) [actual: +3] + │ ├── limit hint: 5.00 + │ ├── sort + │ │ ├── columns: a:1!null c:3 + │ │ ├── ordering: +3 + │ │ ├── limit hint: 5.00 + │ │ └── scan partial_index_const@partial_index_const_a_idx,partial + │ │ ├── columns: a:1!null c:3 + │ │ └── constraint: /1/4: [/1 - ] + │ └── projections + │ └── 1 [as=b:2] + └── 5 + # -------------------------------------------------- # PushLimitIntoIndexJoin # -------------------------------------------------- @@ -1652,13 +1850,11 @@ select │ ├── key: (1) │ └── scan index_tab@geomidx,inverted │ ├── columns: id:1!null geom_inverted_key:11!null - │ ├── inverted constraint: /11/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x16\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(11) + │ └── inverted constraint: /11/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x16\x00\x00\x00\x00\x00\x00\x00") └── filters └── st_intersects('010100000000000000000008400000000000000840', geom:8) [outer=(8), immutable, constraints=(/8: (/NULL - ])] diff --git a/pkg/sql/opt/xform/testdata/rules/select b/pkg/sql/opt/xform/testdata/rules/select index d363cbff9b54..76ae399c4f16 100644 --- a/pkg/sql/opt/xform/testdata/rules/select +++ b/pkg/sql/opt/xform/testdata/rules/select @@ -2932,11 +2932,10 @@ index-join b ├── fd: (1)-->(2-4), (3)~~>(1,2,4) └── scan b@j_inv_idx,inverted ├── columns: k:1!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - └── key: (1) + └── inverted constraint: /7/1 + └── spans + ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] # Disjunction. opt expect=GenerateInvertedIndexScans @@ -2958,14 +2957,12 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] # Disjunction with non-tight predicate. opt expect=GenerateInvertedIndexScans @@ -3009,14 +3006,12 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x03\x00\x01*\x06\x00"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01*\b\x00", "7\x00\x03\x00\x03\x00\x01*\b\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x03\x00\x01*\x06\x00"] + │ └── ["7\x00\x03\x00\x03\x00\x01*\b\x00", "7\x00\x03\x00\x03\x00\x01*\b\x00"] └── filters └── (j:4 @> '[[1, 2]]') OR (j:4 @> '[[3, 4]]') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -3038,12 +3033,10 @@ index-join b ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] - │ └── ["7\x00\x03\x00\xff", "7\x00\x04") - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] + └── ["7\x00\x03\x00\xff", "7\x00\x04") opt expect=GenerateInvertedIndexScans SELECT * FROM b WHERE j @> '{"a": {}}' @@ -3063,12 +3056,10 @@ index-join b ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x019", "7a\x00\x019"] - │ └── ["7a\x00\x02\x00\xff", "7a\x00\x03") - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x019", "7a\x00\x019"] + └── ["7a\x00\x02\x00\xff", "7a\x00\x03") opt expect=GenerateInvertedIndexScans SELECT * FROM b WHERE j @> '{"a": []}' @@ -3088,12 +3079,10 @@ index-join b ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x018", "7a\x00\x018"] - │ └── ["7a\x00\x02\x00\x03", "7a\x00\x02\x00\x03"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x018", "7a\x00\x018"] + └── ["7a\x00\x02\x00\x03", "7a\x00\x02\x00\x03"] opt expect=GenerateInvertedIndexScans SELECT * FROM b WHERE j @> '{"a":[[{"b":{"c":[{"d":"e"}]}}]]}' @@ -3192,12 +3181,10 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ └── ["7a\x00\x018", "7a\x00\x018"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ └── ["7a\x00\x018", "7a\x00\x018"] └── filters └── j:4 <@ '{"a": []}' [outer=(4), immutable] @@ -3229,18 +3216,16 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ ├── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] - │ │ ├── ["7\x00\x03a\x00\x019", "7\x00\x03a\x00\x019"] - │ │ └── ["7\x00\x03a\x00\x02d\x00\x01\n", "7\x00\x03a\x00\x02d\x00\x01\n"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x018", "7\x00\x018"] + │ ├── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] + │ ├── ["7\x00\x03a\x00\x019", "7\x00\x03a\x00\x019"] + │ └── ["7\x00\x03a\x00\x02d\x00\x01\n", "7\x00\x03a\x00\x02d\x00\x01\n"] └── filters └── j:4 <@ '[{"a": {"d": true}}, 1, "b"]' [outer=(4), immutable] @@ -3274,19 +3259,17 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] - │ │ ├── ["7\x00\x018", "7\x00\x01:") - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ │ ├── ["7a\x00\x018", "7a\x00\x018"] - │ │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] + │ ├── ["7\x00\x018", "7\x00\x01:") + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] + │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] + │ ├── ["7a\x00\x018", "7a\x00\x018"] + │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] └── filters └── (j:4 <@ '{"a": [3]}') OR (j:4 <@ '[1, 2, 3]') [outer=(4), immutable] @@ -3328,19 +3311,17 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] - │ │ ├── ["7\x00\x018", "7\x00\x01:") - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ │ ├── ["7a\x00\x018", "7a\x00\x018"] - │ │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] + │ ├── ["7\x00\x018", "7\x00\x01:") + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] + │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] + │ ├── ["7a\x00\x018", "7a\x00\x018"] + │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] └── filters ├── j:4 <@ '{"a": [3]}' [outer=(4), immutable] └── j:4 <@ '[1, 2, 3]' [outer=(4), immutable] @@ -3365,15 +3346,13 @@ project ├── fd: ()-->(4) ├── index-join b │ ├── columns: k:1!null j:4 - │ ├── key: (1) │ ├── fd: (1)-->(4) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x00", "7\x00\x01\x00"] - │ │ └── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] - │ └── key: (1) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x00", "7\x00\x01\x00"] + │ └── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] └── filters └── j:4 = 'null' [outer=(4), immutable, constraints=(/4: [/'null' - /'null']; tight), fd=()-->(4)] @@ -3392,15 +3371,13 @@ project ├── fd: ()-->(4) ├── index-join b │ ├── columns: k:1!null j:4 - │ ├── key: (1) │ ├── fd: (1)-->(4) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] - │ │ └── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] - │ └── key: (1) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] + │ └── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] └── filters └── j:4 = '"b"' [outer=(4), immutable, constraints=(/4: [/'"b"' - /'"b"']; tight), fd=()-->(4)] @@ -3419,15 +3396,13 @@ project ├── fd: ()-->(4) ├── index-join b │ ├── columns: k:1!null j:4 - │ ├── key: (1) │ ├── fd: (1)-->(4) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ └── key: (1) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ └── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] └── filters └── j:4 = '1' [outer=(4), immutable, constraints=(/4: [/'1' - /'1']; tight), fd=()-->(4)] @@ -3446,15 +3421,13 @@ project ├── fd: ()-->(4) ├── index-join b │ ├── columns: k:1!null j:4 - │ ├── key: (1) │ ├── fd: (1)-->(4) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x12\x00\x01", "7\x00\x01\x12\x00\x01"] - │ │ └── ["7\x00\x03\x00\x01\x12\x00\x01", "7\x00\x03\x00\x01\x12\x00\x01"] - │ └── key: (1) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x12\x00\x01", "7\x00\x01\x12\x00\x01"] + │ └── ["7\x00\x03\x00\x01\x12\x00\x01", "7\x00\x03\x00\x01\x12\x00\x01"] └── filters └── j:4 = '""' [outer=(4), immutable, constraints=(/4: [/'""' - /'""']; tight), fd=()-->(4)] @@ -3510,12 +3483,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ └── ["7\x00\x03", "7\x00\x03"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x018", "7\x00\x018"] + │ └── ["7\x00\x03", "7\x00\x03"] └── filters └── j:4 = '[]' [outer=(4), immutable, constraints=(/4: [/'[]' - /'[]']; tight), fd=()-->(4)] @@ -3546,12 +3517,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ └── ["7\x00\xff", "8") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ └── ["7\x00\xff", "8") └── filters └── j:4 = '{}' [outer=(4), immutable, constraints=(/4: [/'{}' - /'{}']; tight), fd=()-->(4)] @@ -3832,13 +3801,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] - │ │ └── ["7\x00\x03b\x00\x01\x12c\x00\x01", "7\x00\x03b\x00\x01\x12c\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x018", "7\x00\x018"] + │ ├── ["7\x00\x03\x00\x019", "7\x00\x03\x00\x019"] + │ └── ["7\x00\x03b\x00\x01\x12c\x00\x01", "7\x00\x03b\x00\x01\x12c\x00\x01"] └── filters └── (j:4->0) <@ '{"b": "c"}' [outer=(4), immutable] @@ -3876,18 +3843,16 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x03\x00\x018", "7\x00\x03\x00\x018"] - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + │ ├── ["7\x00\x018", "7\x00\x018"] + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] + │ ├── ["7\x00\x03\x00\x018", "7\x00\x03\x00\x018"] + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] └── filters └── (j:4->0) <@ '[1, 2]' [outer=(4), immutable] @@ -3945,12 +3910,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x03\x00\x01\x12a\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] + │ └── ["7\x00\x03\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x03\x00\x01\x12a\x00\x01"] └── filters └── (j:4->0) @> '"a"' [outer=(4), immutable] @@ -3983,13 +3946,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ └── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x018", "7\x00\x018"] + │ └── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] └── filters └── (j:4->0) <@ '"a"' [outer=(4), immutable] @@ -4021,12 +3982,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] └── filters └── (j:4->0) @> '1' [outer=(4), immutable] @@ -4059,13 +4018,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ └── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x018", "7\x00\x018"] + │ └── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] └── filters └── (j:4->0) <@ '1' [outer=(4), immutable] @@ -4092,12 +4049,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] # Query using the fetch val and equality operators in a disjunction. opt expect=GenerateInvertedIndexScans @@ -4117,12 +4072,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] # Generate an inverted scan when the index of the fetch val operator is # a string along with the IN operator consisting of JSON strings in @@ -4146,13 +4099,11 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] - │ ├── ["7a\x00\x01*\x04\x00", "7a\x00\x01*\x04\x00"] - │ └── ["7a\x00\x01*\x06\x00", "7a\x00\x01*\x06\x00"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] + ├── ["7a\x00\x01*\x04\x00", "7a\x00\x01*\x04\x00"] + └── ["7a\x00\x01*\x06\x00", "7a\x00\x01*\x06\x00"] # Testing the IN operator, without the fetch val operator, @@ -4187,16 +4138,14 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] - │ │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ │ └── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x01*\x04\x00", "7\x00\x01*\x04\x00"] + │ ├── ["7\x00\x01*\x06\x00", "7\x00\x01*\x06\x00"] + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] + │ └── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] └── filters └── j:4 IN ('1', '2', '3') [outer=(4), constraints=(/4: [/'1' - /'1'] [/'2' - /'2'] [/'3' - /'3']; tight)] @@ -4230,16 +4179,14 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] - │ │ ├── ["7\x00\x01\x12c\x00\x01", "7\x00\x01\x12c\x00\x01"] - │ │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] - │ │ └── ["7\x00\x03\x00\x01\x12c\x00\x01", "7\x00\x03\x00\x01\x12c\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x01\x12b\x00\x01", "7\x00\x01\x12b\x00\x01"] + │ ├── ["7\x00\x01\x12c\x00\x01", "7\x00\x01\x12c\x00\x01"] + │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] + │ └── ["7\x00\x03\x00\x01\x12c\x00\x01", "7\x00\x03\x00\x01\x12c\x00\x01"] └── filters └── j:4 IN ('"a"', '"b"', '"c"') [outer=(4), constraints=(/4: [/'"a"' - /'"a"'] [/'"b"' - /'"b"'] [/'"c"' - /'"c"']; tight)] @@ -4297,12 +4244,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x018", "7\x00\x018"] - │ │ └── ["7\x00\x03", "7\x00\x03"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x018", "7\x00\x018"] + │ └── ["7\x00\x03", "7\x00\x03"] └── filters └── j:4 IN ('[]', '[1]', '[1, 2]', '[1, 2, 3]') [outer=(4), constraints=(/4: [/'[]' - /'[]'] [/'[1]' - /'[1]'] [/'[1, 2]' - /'[1, 2]'] [/'[1, 2, 3]' - /'[1, 2, 3]']; tight)] @@ -4359,12 +4304,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ └── ["7\x00\xff", "8") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ └── ["7\x00\xff", "8") └── filters └── j:4 IN ('{}', '{"a": [1, 2, 3]}', '{"a": "b", "c": "d"}') [outer=(4), constraints=(/4: [/'{}' - /'{}'] [/'{"a": [1, 2, 3]}' - /'{"a": [1, 2, 3]}'] [/'{"a": "b", "c": "d"}' - /'{"a": "b", "c": "d"}']; tight)] @@ -4406,18 +4349,16 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x01\x00", "7\x00\x01\x00"] - │ │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ ├── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] - │ │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\xff", "8") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x01\x00", "7\x00\x01\x00"] + │ ├── ["7\x00\x01\x12a\x00\x01", "7\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x01*\x02\x00", "7\x00\x01*\x02\x00"] + │ ├── ["7\x00\x019", "7\x00\x019"] + │ ├── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] + │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\xff", "8") └── filters └── j:4 IN ('null', '"a"', '1', '{}', '{"a": "b", "c": "d"}') [outer=(4), constraints=(/4: [/'null' - /'null'] [/'"a"' - /'"a"'] [/'1' - /'1'] [/'{}' - /'{}'] [/'{"a": "b", "c": "d"}' - /'{"a": "b", "c": "d"}']; tight)] @@ -4442,12 +4383,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] - │ └── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] + └── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] # Generate an inverted scan when the index of the fetch val operator is @@ -4480,12 +4419,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] - │ │ └── ["7a\x00\x02c\x00\x01\x12d\x00\x01", "7a\x00\x02c\x00\x01\x12d\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] + │ └── ["7a\x00\x02c\x00\x01\x12d\x00\x01", "7a\x00\x02c\x00\x01\x12d\x00\x01"] └── filters └── (j:4->'a') IN ('{"a": "b"}', '{"c": "d"}') [outer=(4), immutable] @@ -4521,14 +4458,12 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7a\x00\x01\x00", "7a\x00\x01\x00"] - │ │ ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] - │ │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] - │ │ └── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7a\x00\x01\x00", "7a\x00\x01\x00"] + │ ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] + │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] + │ └── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] └── filters └── (j:4->'a') IN ('null', '"a"', '1', '{"a": "b"}') [outer=(4), immutable] @@ -4578,17 +4513,15 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7a\x00\x01\x00", "7a\x00\x01\x00"] - │ │ ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] - │ │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] - │ │ └── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7a\x00\x01\x00", "7a\x00\x01\x00"] + │ ├── ["7a\x00\x01\x12a\x00\x01", "7a\x00\x01\x12a\x00\x01"] + │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] + │ └── ["7a\x00\x02a\x00\x01\x12b\x00\x01", "7a\x00\x02a\x00\x01\x12b\x00\x01"] └── filters └── (j:4->'a') IN ('null', '"a"', '1', '[1, 2, 3]', '{"a": "b"}') [outer=(4), immutable] @@ -4649,13 +4582,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] - │ │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x02\x00", "7a\x00\x02\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01*\x04\x00", "7a\x00\x02\x00\x03\x00\x01*\x04\x00"] + │ └── ["7a\x00\x02\x00\x03\x00\x01*\x06\x00", "7a\x00\x02\x00\x03\x00\x01*\x06\x00"] └── filters └── (j:4->'a') IN ('[1]', '[1, 2]', '[1, 2, 3]') [outer=(4), immutable] @@ -4690,12 +4621,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x01*\x04\x00"] └── filters └── (j:4->0) IN ('1', '2') [outer=(4), immutable] @@ -4729,12 +4658,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ │ └── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] + │ └── ["7\x00\x03\x00\x01\x12b\x00\x01", "7\x00\x03\x00\x01\x12b\x00\x01"] └── filters └── (j:4->0) IN ('"a"', '"b"') [outer=(4), immutable] @@ -4768,12 +4695,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x01\x12b\x00\x01"] - │ │ └── ["7\x00\x03c\x00\x01\x12d\x00\x01", "7\x00\x03c\x00\x01\x12d\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x01\x12b\x00\x01"] + │ └── ["7\x00\x03c\x00\x01\x12d\x00\x01", "7\x00\x03c\x00\x01\x12d\x00\x01"] └── filters └── (j:4->0) IN ('{"a": "b"}', '{"c": "d"}') [outer=(4), immutable] @@ -4976,14 +4901,12 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] - │ │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01\x00", "7\x00\x03\x00\x01\x00"] + │ ├── ["7\x00\x03\x00\x01\x12a\x00\x01", "7\x00\x03\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\x03a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x01\x12b\x00\x01"] └── filters └── (j:4->0) IN ('null', '"a"', '1', '{"a": "b"}') [outer=(4), immutable] @@ -5044,13 +4967,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x03\x00\x01*\x06\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] + │ └── ["7\x00\x03\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x03\x00\x01*\x06\x00"] └── filters └── (j:4->0) IN ('[1]', '[1, 2]', '[1, 2, 3]') [outer=(4), immutable] @@ -5087,14 +5008,12 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x03a\x00\x01\x00", "7\x00\x03a\x00\x01\x00"] - │ │ ├── ["7\x00\x03a\x00\x01\x12a\x00\x01", "7\x00\x03a\x00\x01\x12a\x00\x01"] - │ │ ├── ["7\x00\x03a\x00\x01*\x02\x00", "7\x00\x03a\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03a\x00\x02a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x02a\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x03a\x00\x01\x00", "7\x00\x03a\x00\x01\x00"] + │ ├── ["7\x00\x03a\x00\x01\x12a\x00\x01", "7\x00\x03a\x00\x01\x12a\x00\x01"] + │ ├── ["7\x00\x03a\x00\x01*\x02\x00", "7\x00\x03a\x00\x01*\x02\x00"] + │ └── ["7\x00\x03a\x00\x02a\x00\x01\x12b\x00\x01", "7\x00\x03a\x00\x02a\x00\x01\x12b\x00\x01"] └── filters └── ((j:4->0)->'a') IN ('null', '"a"', '1', '{"a": "b"}') [outer=(4), immutable] @@ -5118,12 +5037,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] # Query using the fetch val and containment operators. opt expect=GenerateInvertedIndexScans @@ -5143,12 +5060,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ └── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + └── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] opt expect=GenerateInvertedIndexScans SELECT k FROM b WHERE j->'a' <@ '"b"' @@ -5176,12 +5091,10 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ └── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ └── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] └── filters └── (j:4->'a') <@ '"b"' [outer=(4), immutable] @@ -5203,12 +5116,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] - │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] + └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] opt expect=ConvertJSONSubscriptToFetchValue SELECT k FROM b WHERE j['a']['b'] @> '"c"' @@ -5227,12 +5138,10 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] - │ └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] + └── ["7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x02\x00\x03\x00\x01\x12c\x00\x01"] opt expect=GenerateInvertedIndexScans SELECT k FROM b WHERE j->'a'->'b' <@ '"c"' @@ -5261,13 +5170,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ ├── ["7a\x00\x019", "7a\x00\x019"] - │ │ └── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ ├── ["7a\x00\x019", "7a\x00\x019"] + │ └── ["7a\x00\x02b\x00\x01\x12c\x00\x01", "7a\x00\x02b\x00\x01\x12c\x00\x01"] └── filters └── ((j:4->'a')->'b') <@ '"c"' [outer=(4), immutable] @@ -5291,14 +5198,12 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] - │ ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - │ └── ["7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] + ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] + └── ["7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] # Query using the fetch val and contains operators in a disjunction with a # contained by operator. @@ -5329,13 +5234,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] - │ │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] + │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] └── filters └── ((j:4->'a') @> '["b"]') OR (j:4 <@ '{"c": "d"}') [outer=(4), immutable] @@ -5366,14 +5269,12 @@ project ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:7!null - ├── inverted constraint: /7/1 - │ └── spans - │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] - │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] - │ ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - │ └── ["7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] - ├── key: (1) - └── fd: (1)-->(7) + └── inverted constraint: /7/1 + └── spans + ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] + ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] + ├── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] + └── ["7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01", "7c\x00\x02\x00\x03\x00\x01\x12d\x00\x01"] # Query using the fetch val and contains operators in conjunction with a # contained by operator. @@ -5410,13 +5311,11 @@ project │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["7\x00\x019", "7\x00\x019"] - │ │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] - │ │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["7\x00\x019", "7\x00\x019"] + │ ├── ["7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01", "7a\x00\x02\x00\x03\x00\x01\x12b\x00\x01"] + │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] └── filters └── j:4 <@ '{"c": "d"}' [outer=(4), immutable] @@ -5466,10 +5365,8 @@ project ├── key: (1) └── scan c@a_inv_idx,inverted ├── columns: k:1!null a_inverted_key:6!null - ├── inverted constraint: /6/1 - │ └── spans: ["\x89", "\x8b") - ├── key: (1) - └── fd: (1)-->(6) + └── inverted constraint: /6/1 + └── spans: ["\x89", "\x8b") opt expect=GenerateInvertedIndexScans SELECT k FROM c WHERE a @> ARRAY[]::INT[] @@ -5530,12 +5427,10 @@ select │ ├── key: (1) │ └── scan c@a_inv_idx,inverted │ ├── columns: k:1!null a_inverted_key:6!null - │ ├── inverted constraint: /6/1 - │ │ └── spans - │ │ ├── ["C", "C"] - │ │ └── ["\x89", "\x89"] - │ ├── key: (1) - │ └── fd: (1)-->(6) + │ └── inverted constraint: /6/1 + │ └── spans + │ ├── ["C", "C"] + │ └── ["\x89", "\x89"] └── filters └── a:2 <@ ARRAY[1] [outer=(2), immutable] @@ -5601,12 +5496,10 @@ select │ ├── key: (1) │ └── scan c@a_inv_idx,inverted │ ├── columns: k:1!null a_inverted_key:6!null - │ ├── inverted constraint: /6/1 - │ │ └── spans - │ │ ├── ["C", "C"] - │ │ └── ["\x89", "\x8b") - │ ├── key: (1) - │ └── fd: (1)-->(6) + │ └── inverted constraint: /6/1 + │ └── spans + │ ├── ["C", "C"] + │ └── ["\x89", "\x8b") └── filters └── (a:2 <@ ARRAY[1]) OR (a:2 <@ ARRAY[2]) [outer=(2), immutable] @@ -5637,12 +5530,10 @@ select │ ├── key: (1) │ └── scan c@a_inv_idx,inverted │ ├── columns: k:1!null a_inverted_key:6!null - │ ├── inverted constraint: /6/1 - │ │ └── spans - │ │ ├── ["C", "C"] - │ │ └── ["\x89", "\x8b") - │ ├── key: (1) - │ └── fd: (1)-->(6) + │ └── inverted constraint: /6/1 + │ └── spans + │ ├── ["C", "C"] + │ └── ["\x89", "\x8b") └── filters ├── a:2 <@ ARRAY[1] [outer=(2), immutable] └── a:2 <@ ARRAY[2] [outer=(2), immutable] @@ -5680,9 +5571,7 @@ index-join c ├── columns: k:1!null a_inverted_key:6!null ├── inverted constraint: /6/1 │ └── spans: ["\x89", "\x8b") - ├── flags: force-index=a_inv_idx - ├── key: (1) - └── fd: (1)-->(6) + └── flags: force-index=a_inv_idx opt expect=GenerateInvertedIndexScans SELECT * FROM c WHERE a && ARRAY[1] AND a && ARRAY[2] @@ -5808,12 +5697,10 @@ index-join b ├── key: (1) └── scan b@j_inv_idx,inverted ├── columns: k:1!null j_inverted_key:8!null - ├── inverted constraint: /8/1 - │ └── spans - │ ├── ["7\x00\x018", "7\x00\x018"] - │ └── ["7\x00\x03", "7\x00\x03"] - ├── key: (1) - └── fd: (1)-->(8) + └── inverted constraint: /8/1 + └── spans + ├── ["7\x00\x018", "7\x00\x018"] + └── ["7\x00\x03", "7\x00\x03"] # The inverted index will never be used for <@ expressions if the index version # does not have empty arrays in the inverted index. @@ -5878,13 +5765,11 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_intersects('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -5918,13 +5803,11 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_intersects(geog:4, '0101000020E61000009279E40F069E45C0BEE36FD63B1D5240') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -5957,13 +5840,11 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:3) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -5994,10 +5875,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_coveredby(geom:3, '0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000') [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6027,10 +5906,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_dwithin('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:3, 2.0) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6060,10 +5937,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_dfullywithin('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:3, 2.0) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6103,20 +5978,18 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x01", "B\xfdF\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x01", "B\xfdR\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdR\x00\x00\x00\x00\x00\x00\x01", "B\xfdT\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdT\x00\x00\x00\x00\x00\x00\x00", "B\xfdT\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdZ\x00\x00\x00\x00\x00\x00\x01", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\\\x00\x00\x00\x00\x00\x00\x00", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x01", "B\xfdF\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x01", "B\xfdR\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdR\x00\x00\x00\x00\x00\x00\x01", "B\xfdT\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdT\x00\x00\x00\x00\x00\x00\x00", "B\xfdT\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdZ\x00\x00\x00\x00\x00\x00\x01", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\\\x00\x00\x00\x00\x00\x00\x00", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_dwithin('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4, 2000.0) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -6156,20 +6029,18 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x01", "B\xfdF\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x01", "B\xfdR\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdR\x00\x00\x00\x00\x00\x00\x01", "B\xfdT\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdT\x00\x00\x00\x00\x00\x00\x00", "B\xfdT\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdZ\x00\x00\x00\x00\x00\x00\x01", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\\\x00\x00\x00\x00\x00\x00\x00", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x01", "B\xfdF\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x01", "B\xfdR\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdR\x00\x00\x00\x00\x00\x00\x01", "B\xfdT\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdT\x00\x00\x00\x00\x00\x00\x00", "B\xfdT\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdZ\x00\x00\x00\x00\x00\x00\x01", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\\\x00\x00\x00\x00\x00\x00\x00", "B\xfd\\\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_dwithin('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4, 2000.0, false) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -6200,10 +6071,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_dwithinexclusive('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:3, 2.0) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6234,10 +6103,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_dwithinexclusive(geom:3, '0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', 2.0) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6267,10 +6134,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_dfullywithinexclusive('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:3, 2.0) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6308,13 +6173,11 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdO\x00\x00\x00\x00\x00\x00\x00", "B\xfdO\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdO\x00\x00\x00\x00\x00\x00\x00", "B\xfdO\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] └── filters ├── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) [outer=(4), immutable, constraints=(/4: (/NULL - ])] └── st_coveredby('0101000020E61000009279E40F061E44C0BEE36FD63B9D5140', geog:4) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -6347,12 +6210,10 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") └── filters └── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) OR st_coveredby('0101000020E61000009279E40F061E44C0BEE36FD63B9D5140', geog:4) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -6409,17 +6270,15 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdG\x00\x00\x00\x00\x00\x00\x00", "B\xfdG\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") - │ │ ├── ["B\xfd\x81\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x81\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x84\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x84\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdG\x00\x00\x00\x00\x00\x00\x00", "B\xfdG\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") + │ ├── ["B\xfd\x81\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x81\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x84\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x84\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) OR (st_coveredby('0102000020E61000000200000075029A081B9A5DC0F085C954C1F840406DC5FEB27B720440454772F90F814840', geog:4) AND st_coveredby('0101000020E610000058569A94821E46C07BC7DFAC773A4E40', geog:4)) [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -6452,10 +6311,8 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters ├── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) [outer=(4), immutable, constraints=(/4: (/NULL - ])] └── st_overlaps('0102000000020000000000000000000000000000000000000000000000000000000000000000000040', geom:3) [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6503,13 +6360,11 @@ project │ │ │ ├── key: (9) │ │ │ └── scan g@geog_idx,inverted │ │ │ ├── columns: k:9!null geog_inverted_key:16!null - │ │ │ ├── inverted constraint: /16/9 - │ │ │ │ └── spans - │ │ │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ │ ├── key: (9) - │ │ │ └── fd: (9)-->(16) + │ │ │ └── inverted constraint: /16/9 + │ │ │ └── spans + │ │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] │ │ └── filters │ │ └── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:12) [outer=(12), immutable, constraints=(/12: (/NULL - ])] │ └── select @@ -6531,10 +6386,8 @@ project │ │ ├── key: (17) │ │ └── scan g@geom_idx,inverted │ │ ├── columns: k:17!null geom_inverted_key:23!null - │ │ ├── inverted constraint: /23/17 - │ │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ ├── key: (17) - │ │ └── fd: (17)-->(23) + │ │ └── inverted constraint: /23/17 + │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") │ └── filters │ └── st_touches('0102000000020000000000000000000000000000000000000000000000000000000000000000000040', geom:19) [outer=(19), immutable, constraints=(/19: (/NULL - ])] └── aggregations @@ -6572,12 +6425,10 @@ project │ ├── key: (1) │ └── scan g@geog_idx,inverted │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x01") └── filters └── st_covers('0101000020E61000009279E40F069E45C0BEE36FD63B1D5240', geog:4) OR (st_intersects('0101000020E61000009279E40F061E44C0BEE36FD63B9D5140', geog:4) AND st_crosses('0102000000020000000000000000000000000000000000000000000000000000000000000000000040', geom:3)) [outer=(3,4), immutable, constraints=(/4: (/NULL - ])] @@ -6613,17 +6464,13 @@ project │ ├── key: (1) │ └── select │ ├── columns: k:1!null geog_inverted_key:8!null - │ ├── key: (1) - │ ├── fd: (1)-->(8) │ ├── scan g@geog_idx,inverted │ │ ├── columns: k:1!null geog_inverted_key:8!null - │ │ ├── inverted constraint: /8/1 - │ │ │ └── spans - │ │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") - │ │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── key: (1) - │ │ └── fd: (1)-->(8) + │ │ └── inverted constraint: /8/1 + │ │ └── spans + │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ │ ├── ["B\xfdN\x00\x00\x00\x00\x00\x00\x01", "B\xfdP\x00\x00\x00\x00\x00\x00\x00") + │ │ └── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] │ └── filters │ └── k:1 > 100 [outer=(1), constraints=(/1: [/101 - ]; tight)] └── filters @@ -6659,12 +6506,10 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x18\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x18\x00\x00\x00\x00\x00\x00\x00") └── filters └── 'BOX(1 2,3 4)' ~ geom:3 [outer=(3), immutable] @@ -6696,12 +6541,10 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── geom:3 ~ 'BOX(1 2,3 4)' [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6740,14 +6583,12 @@ project │ ├── key: (1) │ └── scan g@geom_idx,inverted │ ├── columns: k:1!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x15\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x15\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(7) + │ └── inverted constraint: /7/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x15\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x15\x00\x00\x00\x00\x00\x00\x00"] └── filters └── geom:3 ~ '01040000000200000001010000009A999999999901409A99999999990140010100000000000000000008400000000000000840' [outer=(3), immutable, constraints=(/3: (/NULL - ])] @@ -6868,8 +6709,7 @@ select │ │ └── spans │ │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] │ │ └── ["7group\x00\x01*\x02\x00", "7group\x00\x01*\x02\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── key: (1,9) └── filters └── s:2 = 'foo' [outer=(2), constraints=(/2: [/'foo' - /'foo']; tight), fd=()-->(2)] @@ -7028,13 +6868,11 @@ select │ └── scan m@multicol,inverted │ ├── columns: k:1!null geom_inverted_key:8!null │ ├── constraint: /2: [/'foo' - /'foo'] - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7067,13 +6905,11 @@ select │ ├── constraint: /2 │ │ ├── [/'bar' - /'bar'] │ │ └── [/'foo' - /'foo'] - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7147,13 +6983,11 @@ select │ └── scan m@multicol,inverted │ ├── columns: k:1!null geom_inverted_key:9!null │ ├── constraint: /2/3: [/'foo'/'bar' - /'foo'/'bar'] - │ ├── inverted constraint: /9/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── inverted constraint: /9/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7188,13 +7022,11 @@ select │ │ ├── [/'bar'/'bob' - /'bar'/'bob'] │ │ ├── [/'foo'/'baz' - /'foo'/'baz'] │ │ └── [/'foo'/'bob' - /'foo'/'bob'] - │ ├── inverted constraint: /9/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── inverted constraint: /9/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7331,13 +7163,11 @@ project │ │ ├── [/1 - /1] │ │ ├── [/2 - /2] │ │ └── [/3 - /3] - │ ├── inverted constraint: /10/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(10) + │ └── inverted constraint: /10/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7386,13 +7216,11 @@ select │ │ ├── [/'bar' - /'bar'] │ │ ├── [/'baz' - /'baz'] │ │ └── [/'foo' - /'foo'] - │ ├── inverted constraint: /8/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(8) + │ └── inverted constraint: /8/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7434,13 +7262,11 @@ select │ │ ├── [/'x'/'bar' - /'x'/'bar'] │ │ ├── [/'x'/'baz' - /'x'/'baz'] │ │ └── [/'x'/'foo' - /'x'/'foo'] - │ ├── inverted constraint: /9/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── inverted constraint: /9/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7480,13 +7306,11 @@ select │ └── scan mc@mc_idx,inverted │ ├── columns: k:1!null geom_inverted_key:10!null │ ├── constraint: /4: [/'FOO' - /'FOO'] - │ ├── inverted constraint: /10/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(10) + │ └── inverted constraint: /10/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters ├── a:2 = 'foo' [outer=(2), constraints=(/2: [/'foo' - /'foo']; tight), fd=()-->(2)] └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7519,13 +7343,11 @@ select │ └── scan mc@mc_idx,inverted │ ├── columns: k:1!null geom_inverted_key:10!null │ ├── constraint: /4: [/'FOO' - /'FOO'] - │ ├── inverted constraint: /10/1 - │ │ └── spans - │ │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(10) + │ └── inverted constraint: /10/1 + │ └── spans + │ ├── ["B\xfd\x10\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x10\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x11\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x11\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\x14\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x14\x00\x00\x00\x00\x00\x00\x00"] └── filters └── st_coveredby('0103000000010000000500000000000000000000000000000000000000000000000000F03F0000000000000000000000000000F03F000000000000F03F0000000000000000000000000000F03F00000000000000000000000000000000', geom:5) [outer=(5), immutable, constraints=(/5: (/NULL - ])] @@ -7673,10 +7495,8 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geom_idx,inverted │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/4 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(7) + │ └── inverted constraint: /7/4 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_distance(geom:1, '010100000000000000000000000000000000000000') = 0.0 [outer=(1), immutable] @@ -7699,10 +7519,8 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geog_idx,inverted │ ├── columns: rowid:4!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/4 - │ │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(8) + │ └── inverted constraint: /8/4 + │ └── spans: ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") └── filters └── st_distance(geog:2, '0101000020E610000000000000000000000000000000000000', false) = 0.0 [outer=(2), immutable] @@ -7765,12 +7583,10 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geom_idx,inverted │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] - │ ├── key: (4) - │ └── fd: (4)-->(7) + │ └── inverted constraint: /7/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] └── filters └── st_distance(geom:1, '010100000000000000000000000000000000000000') <= 5.0 [outer=(1), immutable] @@ -7795,12 +7611,10 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geom_idx,inverted │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] - │ ├── key: (4) - │ └── fd: (4)-->(7) + │ └── inverted constraint: /7/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] └── filters └── st_distance('010100000000000000000000000000000000000000', geom:1) < 5.0 [outer=(1), immutable] @@ -7879,39 +7693,37 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geog_idx,inverted │ ├── columns: rowid:4!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(8) + │ └── inverted constraint: /8/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") └── filters └── st_distance(geog:2, '0101000020E610000000000000000000000000000000000000') <= 5.0 [outer=(2), immutable] @@ -7963,39 +7775,37 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geog_idx,inverted │ ├── columns: rowid:4!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(8) + │ └── inverted constraint: /8/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") └── filters └── st_distance(geog:2, '0101000020E610000000000000000000000000000000000000') < 5.0 [outer=(2), immutable] @@ -8050,39 +7860,37 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geog_idx,inverted │ ├── columns: rowid:4(int!null) geog_inverted_key:8(encodedkey!null) - │ ├── inverted constraint: /8/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(8) + │ └── inverted constraint: /8/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") └── filters └── lt [type=bool, outer=(2), immutable] ├── function: st_distance [type=float] @@ -8156,39 +7964,37 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geog_idx,inverted │ ├── columns: rowid:4!null geog_inverted_key:8!null - │ ├── inverted constraint: /8/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] - │ │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") - │ │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] - │ │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") - │ ├── key: (4) - │ └── fd: (4)-->(8) + │ └── inverted constraint: /8/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd \x00\x00\x00\x00\x00\x00\x01", "B\xfd\"\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\"\x00\x00\x00\x00\x00\x00\x01", "B\xfd$\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd$\x00\x00\x00\x00\x00\x00\x00", "B\xfd$\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd0\x00\x00\x00\x00\x00\x00\x00", "B\xfd0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x00", "B\xfd<\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd<\x00\x00\x00\x00\x00\x00\x01", "B\xfd>\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd>\x00\x00\x00\x00\x00\x00\x01", "B\xfd@\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd@\x00\x00\x00\x00\x00\x00\x01", "B\xfdB\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdD\x00\x00\x00\x00\x00\x00\x00", "B\xfdD\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdF\x00\x00\x00\x00\x00\x00\x01", "B\xfdH\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdH\x00\x00\x00\x00\x00\x00\x01", "B\xfdJ\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdJ\x00\x00\x00\x00\x00\x00\x01", "B\xfdL\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfdL\x00\x00\x00\x00\x00\x00\x00", "B\xfdL\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfdP\x00\x00\x00\x00\x00\x00\x00", "B\xfdP\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8a\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x8c\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x8c\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x8e\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x90\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x90\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x92\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x00", "B\xfd\x94\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\x94\x00\x00\x00\x00\x00\x00\x01", "B\xfd\x96\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb0\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xb4\x00\x00\x00\x00\x00\x00\x00"] + │ ├── ["B\xfd\xb4\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb6\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb6\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xb8\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xb8\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xba\x00\x00\x00\x00\x00\x00\x00") + │ ├── ["B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00", "B\xfd\xbc\x00\x00\x00\x00\x00\x00\x00"] + │ └── ["B\xfd\xbe\x00\x00\x00\x00\x00\x00\x01", "B\xfd\xc0\x00\x00\x00\x00\x00\x00\x00") └── filters └── st_distance(geog:2, '0101000020E610000000000000000000000000000000000000', true) <= 5.0 [outer=(2), immutable] @@ -8287,12 +8093,10 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geom_idx,inverted │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] - │ ├── key: (4) - │ └── fd: (4)-->(7) + │ └── inverted constraint: /7/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] └── filters └── st_maxdistance(geom:1, '010100000000000000000000000000000000000000') <= 5.0 [outer=(1), immutable] @@ -8317,12 +8121,10 @@ select │ ├── key: (4) │ └── scan geom_geog@geom_geog_geom_idx,inverted │ ├── columns: rowid:4!null geom_inverted_key:7!null - │ ├── inverted constraint: /7/4 - │ │ └── spans - │ │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") - │ │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] - │ ├── key: (4) - │ └── fd: (4)-->(7) + │ └── inverted constraint: /7/4 + │ └── spans + │ ├── ["B\x89", "B\xfd \x00\x00\x00\x00\x00\x00\x00") + │ └── ["B\xfd\xff\xff\xff\xff\xff\xff\xff\xff", "B\xfd\xff\xff\xff\xff\xff\xff\xff\xff"] └── filters └── st_maxdistance('010100000000000000000000000000000000000000', geom:1) < 5.0 [outer=(1), immutable] @@ -8435,14 +8237,12 @@ select │ ├── key: (1) │ └── scan trgm@s_idx,inverted │ ├── columns: k:1!null s_inverted_key:5!null - │ ├── inverted constraint: /5/1 - │ │ └── spans - │ │ ├── ["\x12 f\x00\x01", "\x12 f\x00\x01"] - │ │ ├── ["\x12 fo\x00\x01", "\x12 fo\x00\x01"] - │ │ ├── ["\x12foo\x00\x01", "\x12foo\x00\x01"] - │ │ └── ["\x12oo \x00\x01", "\x12oo \x00\x01"] - │ ├── key: (1) - │ └── fd: (1)-->(5) + │ └── inverted constraint: /5/1 + │ └── spans + │ ├── ["\x12 f\x00\x01", "\x12 f\x00\x01"] + │ ├── ["\x12 fo\x00\x01", "\x12 fo\x00\x01"] + │ ├── ["\x12foo\x00\x01", "\x12foo\x00\x01"] + │ └── ["\x12oo \x00\x01", "\x12oo \x00\x01"] └── filters └── s:2 % 'foo' [outer=(2), stable] @@ -8605,9 +8405,7 @@ project │ │ ├── fd: ()-->(9) │ │ └── select │ │ ├── columns: a:9!null b:10!null c_inverted_key:14!null - │ │ ├── cardinality: [0 - 2] - │ │ ├── key: (10) - │ │ ├── fd: ()-->(9), (10)-->(14) + │ │ ├── fd: ()-->(9) │ │ ├── scan t122733@i122733,inverted │ │ │ ├── columns: a:9!null b:10!null c_inverted_key:14!null │ │ │ ├── constraint: /9: [/'foo' - /'foo'] @@ -8616,8 +8414,7 @@ project │ │ │ │ ├── ["\x12bar\x00\x01", "\x12bar\x00\x01"] │ │ │ │ └── ["\x12foo\x00\x01", "\x12foo\x00\x01"] │ │ │ ├── flags: force-index=i122733 - │ │ │ ├── key: (10) - │ │ │ └── fd: ()-->(9), (10)-->(14) + │ │ │ └── fd: ()-->(9) │ │ └── filters │ │ └── (b:10 = 'foo') OR (b:10 = 'bar') [outer=(10), constraints=(/10: [/'bar' - /'bar'] [/'foo' - /'foo']; tight)] │ └── filters (true) @@ -8654,10 +8451,8 @@ project │ │ ├── fd: ()-->(9) │ │ └── select │ │ ├── columns: a:9!null b:10!null c_inverted_key:14!null - │ │ ├── cardinality: [0 - 2] │ │ ├── immutable - │ │ ├── key: (10) - │ │ ├── fd: ()-->(9), (10)-->(14) + │ │ ├── fd: ()-->(9) │ │ ├── scan t122733@i122733,inverted │ │ │ ├── columns: a:9!null b:10!null c_inverted_key:14!null │ │ │ ├── constraint: /9: [/'foo' - /'foo'] @@ -8666,8 +8461,7 @@ project │ │ │ │ ├── ["\x12bar\x00\x01", "\x12bar\x00\x01"] │ │ │ │ └── ["\x12foo\x00\x01", "\x12foo\x00\x01"] │ │ │ ├── flags: force-index=i122733 - │ │ │ ├── key: (10) - │ │ │ └── fd: ()-->(9), (10)-->(14) + │ │ │ └── fd: ()-->(9) │ │ └── filters │ │ ├── (b:10 = 'foo') OR (b:10 = 'bar') [outer=(10), constraints=(/10: [/'bar' - /'bar'] [/'foo' - /'foo']; tight)] │ │ └── b:10::INT8 > 5 [outer=(10), immutable] @@ -10064,13 +9858,11 @@ select │ ├── key: (1) │ └── scan b@j_inv_idx,inverted │ ├── columns: k:1!null j_inverted_key:9!null - │ ├── inverted constraint: /9/1 - │ │ └── spans - │ │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] - │ │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] - │ │ └── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] - │ ├── key: (1) - │ └── fd: (1)-->(9) + │ └── inverted constraint: /9/1 + │ └── spans + │ ├── ["7\x00\x03\x00\x01*\x06\x00", "7\x00\x03\x00\x01*\x06\x00"] + │ ├── ["7\x00\x03\x00\x03\x00\x01*\x02\x00", "7\x00\x03\x00\x03\x00\x01*\x02\x00"] + │ └── ["7\x00\x03\x00\x03\x00\x01*\x04\x00", "7\x00\x03\x00\x03\x00\x01*\x04\x00"] └── filters └── (j:4 @> '[3]') OR (j:4 @> '[[1, 2]]') [outer=(4), immutable, constraints=(/4: (/NULL - ])] @@ -10123,9 +9915,7 @@ project │ └── spans │ ├── ["7a\x00\x01\x12b\x00\x01", "7a\x00\x01\x12b\x00\x01"] │ └── ["7c\x00\x01\x12d\x00\x01", "7c\x00\x01\x12d\x00\x01"] - ├── flags: no-zigzag-join - ├── key: (1) - └── fd: (1)-->(9) + └── flags: no-zigzag-join exec-ddl CREATE TABLE inv_zz_partial ( @@ -10167,8 +9957,7 @@ project │ └── spans │ ├── ["7a\x00\x01*\x02\x00", "7a\x00\x01*\x02\x00"] │ └── ["7b\x00\x01*\x04\x00", "7b\x00\x01*\x04\x00"] - ├── key: (1) - └── fd: (1)-->(7) + └── key: (1,7) # Generate a zigzag join on a single index with a remaining filter. opt expect=GenerateInvertedIndexZigzagJoins diff --git a/pkg/sql/opt/xform/testdata/rules/select_for_update b/pkg/sql/opt/xform/testdata/rules/select_for_update index 8d42835cbe58..65c31e75c88e 100644 --- a/pkg/sql/opt/xform/testdata/rules/select_for_update +++ b/pkg/sql/opt/xform/testdata/rules/select_for_update @@ -204,9 +204,7 @@ select │ │ ├── ["C", "C"] │ │ └── ["\x89", "\x8b") │ ├── locking: for-update - │ ├── volatile - │ ├── key: (1) - │ └── fd: (1)-->(6) + │ └── volatile └── filters └── b:2 <@ ARRAY[1,2] [outer=(2), immutable] diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index add9e6fd9201..022bc30a22d6 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" @@ -1524,10 +1523,6 @@ func (ef *execFactory) ConstructUpdate( // since it compiles tuples and subqueries into a simple sequence of target // columns. updateCols := makeColList(table, updateColOrdSet) - sourceSlots := make([]sourceSlot, len(updateCols)) - for i := range sourceSlots { - sourceSlots[i] = scalarSlot{column: updateCols[i], sourceIndex: len(fetchCols) + i} - } // Create the table updater, which does the bulk of the work. internal := ef.planner.SessionData().Internal @@ -1548,28 +1543,12 @@ func (ef *execFactory) ConstructUpdate( return nil, err } - // updateColsIdx inverts the mapping of UpdateCols to FetchCols. See - // the explanatory comments in updateRun. - var updateColsIdx catalog.TableColMap - for i := range ru.UpdateCols { - id := ru.UpdateCols[i].GetID() - updateColsIdx.Set(id, i) - } - upd := updateNodePool.Get().(*updateNode) *upd = updateNode{ source: input.(planNode), run: updateRun{ - tu: tableUpdater{ru: ru}, - checkOrds: checks, - iVarContainerForComputedCols: schemaexpr.RowIndexedVarContainer{ - CurSourceRow: make(tree.Datums, len(ru.FetchCols)), - Cols: ru.FetchCols, - Mapping: ru.FetchColIDtoRowIndex, - }, - sourceSlots: sourceSlots, - updateValues: make(tree.Datums, len(ru.UpdateCols)), - updateColsIdx: updateColsIdx, + tu: tableUpdater{ru: ru}, + checkOrds: checks, numPassthrough: len(passthrough), }, } diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index d1f79c2a21ba..98776f7e6938 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -872,9 +872,6 @@ func (u *sqlSymUnion) showRangesOpts() *tree.ShowRangesOptions { func (u *sqlSymUnion) tenantSpec() *tree.TenantSpec { return u.val.(*tree.TenantSpec) } -func (u *sqlSymUnion) likeTenantSpec() *tree.LikeTenantSpec { - return u.val.(*tree.LikeTenantSpec) -} func (u *sqlSymUnion) cteMaterializeClause() tree.CTEMaterializeClause { return u.val.(tree.CTEMaterializeClause) } @@ -977,7 +974,7 @@ func (u *sqlSymUnion) triggerForEach() tree.TriggerForEach { %token HAVING HASH HEADER HIGH HISTOGRAM HOLD HOUR %token IDENTITY -%token IF IFERROR IFNULL IGNORE_FOREIGN_KEYS ILIKE IMMEDIATE IMMEDIATELY IMMUTABLE IMPORT IN INCLUDE +%token IF IFERROR IFNULL IGNORE_FOREIGN_KEYS IGNORE_CDC_IGNORED_TTL_DELETES ILIKE IMMEDIATE IMMEDIATELY IMMUTABLE IMPORT IN INCLUDE %token INCLUDING INCLUDE_ALL_SECONDARY_TENANTS INCLUDE_ALL_VIRTUAL_CLUSTERS INCREMENT INCREMENTAL INCREMENTAL_LOCATION %token INET INET_CONTAINED_BY_OR_EQUALS %token INET_CONTAINS_OR_EQUALS INDEX INDEXES INHERITS INJECT INITIALLY @@ -1238,7 +1235,6 @@ func (u *sqlSymUnion) triggerForEach() tree.TriggerForEach { %type create_proc_stmt %type create_trigger_stmt -%type <*tree.LikeTenantSpec> opt_like_virtual_cluster %type logical_replication_resources, logical_replication_resources_list %type <*tree.LogicalReplicationOptions> opt_logical_replication_options logical_replication_options logical_replication_options_list @@ -1725,7 +1721,7 @@ func (u *sqlSymUnion) triggerForEach() tree.TriggerForEach { %type opt_in_schemas %type target_object_type -// User defined function relevant components. +// Routine (UDF/SP) relevant components. %type opt_or_replace opt_return_table opt_return_set opt_no %type param_name routine_as %type opt_routine_param_with_default_list routine_param_with_default_list func_params func_params_list @@ -4634,10 +4630,11 @@ create_stmt: // ON 'stream_uri' // INTO // [WITH -// < MODE = immediate | transactional > | +// < MODE = immediate | validated > | // < CURSOR = start_time > | // < DEFAULT FUNCTION = lww | dlq | udf -// < FUNCTION 'udf' FOR TABLE local_name , ... > +// < FUNCTION 'udf' FOR TABLE local_name , ... > | +// < IGNORE_CDC_IGNORED_TTL_DELETES > // ] create_logical_replication_stream_stmt: CREATE LOGICAL REPLICATION STREAM FROM logical_replication_resources ON string_or_placeholder INTO logical_replication_resources opt_logical_replication_options @@ -4735,53 +4732,53 @@ logical_replication_options: { $$.val = &tree.LogicalReplicationOptions{UserFunctions: map[tree.UnresolvedName]tree.RoutineName{*$5.unresolvedObjectName().ToUnresolvedName():$2.unresolvedObjectName().ToRoutineName()}} } +| IGNORE_CDC_IGNORED_TTL_DELETES + { + $$.val = &tree.LogicalReplicationOptions{IgnoreCDCIgnoredTTLDeletes: tree.MakeDBool(true)} + } // %Help: CREATE VIRTUAL CLUSTER - create a new virtual cluster // %Category: Experimental // %Text: -// CREATE VIRTUAL CLUSTER [ IF NOT EXISTS ] name [ LIKE ] [ ] +// CREATE VIRTUAL CLUSTER [ IF NOT EXISTS ] name [ ] // // Replication option: // FROM REPLICATION OF ON [ WITH OPTIONS ... ] create_virtual_cluster_stmt: - CREATE virtual_cluster d_expr opt_like_virtual_cluster + CREATE virtual_cluster d_expr { /* SKIP DOC */ $$.val = &tree.CreateTenant{ TenantSpec: &tree.TenantSpec{IsName: true, Expr: $3.expr()}, - Like: $4.likeTenantSpec(), } } -| CREATE virtual_cluster IF NOT EXISTS d_expr opt_like_virtual_cluster +| CREATE virtual_cluster IF NOT EXISTS d_expr { /* SKIP DOC */ $$.val = &tree.CreateTenant{ IfNotExists: true, TenantSpec: &tree.TenantSpec{IsName: true, Expr: $6.expr()}, - Like: $7.likeTenantSpec(), } } -| CREATE virtual_cluster d_expr opt_like_virtual_cluster FROM REPLICATION OF d_expr ON d_expr opt_with_replication_options +| CREATE virtual_cluster d_expr FROM REPLICATION OF d_expr ON d_expr opt_with_replication_options { /* SKIP DOC */ $$.val = &tree.CreateTenantFromReplication{ TenantSpec: &tree.TenantSpec{IsName: true, Expr: $3.expr()}, - ReplicationSourceTenantName: &tree.TenantSpec{IsName: true, Expr: $8.expr()}, - ReplicationSourceAddress: $10.expr(), - Options: *$11.tenantReplicationOptions(), - Like: $4.likeTenantSpec(), + ReplicationSourceTenantName: &tree.TenantSpec{IsName: true, Expr: $7.expr()}, + ReplicationSourceAddress: $9.expr(), + Options: *$10.tenantReplicationOptions(), } } -| CREATE virtual_cluster IF NOT EXISTS d_expr opt_like_virtual_cluster FROM REPLICATION OF d_expr ON d_expr opt_with_replication_options +| CREATE virtual_cluster IF NOT EXISTS d_expr FROM REPLICATION OF d_expr ON d_expr opt_with_replication_options { /* SKIP DOC */ $$.val = &tree.CreateTenantFromReplication{ IfNotExists: true, TenantSpec: &tree.TenantSpec{IsName: true, Expr: $6.expr()}, - ReplicationSourceTenantName: &tree.TenantSpec{IsName: true, Expr: $11.expr()}, - ReplicationSourceAddress: $13.expr(), - Options: *$14.tenantReplicationOptions(), - Like: $7.likeTenantSpec(), + ReplicationSourceTenantName: &tree.TenantSpec{IsName: true, Expr: $10.expr()}, + ReplicationSourceAddress: $12.expr(), + Options: *$13.tenantReplicationOptions(), } } | CREATE virtual_cluster error // SHOW HELP: CREATE VIRTUAL CLUSTER @@ -4790,19 +4787,6 @@ virtual_cluster: TENANT { /* SKIP DOC */ } | VIRTUAL CLUSTER -// opt_like_virtual_cluster defines a LIKE clause for CREATE VIRTUAL CLUSTER. -// Eventually this can grow to support INCLUDING/EXCLUDING options -// like in CREATE TABLE. -opt_like_virtual_cluster: - /* EMPTY */ - { - $$.val = &tree.LikeTenantSpec{} - } -| LIKE virtual_cluster_spec - { - $$.val = &tree.LikeTenantSpec{OtherTenant: $2.tenantSpec()} - } - // Optional tenant replication options. opt_with_replication_options: WITH replication_options_list @@ -4884,6 +4868,7 @@ create_extension_stmt: // | [ NOT ] LEAKPROOF // | { CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } // | AS 'definition' +// | { [ EXTERNAL ] SECURITY DEFINER } // } ... // %SeeAlso: WEBDOCS/create-function.html create_func_stmt: @@ -5099,19 +5084,19 @@ common_routine_opt_item: } | EXTERNAL SECURITY DEFINER { - return unimplemented(sqllex, "create function...security") + $$.val = tree.RoutineDefiner } | EXTERNAL SECURITY INVOKER { - return unimplemented(sqllex, "create function...security") + $$.val = tree.RoutineInvoker } | SECURITY DEFINER { - return unimplemented(sqllex, "create function...security") + $$.val = tree.RoutineDefiner } | SECURITY INVOKER { - return unimplemented(sqllex, "create function...security") + $$.val = tree.RoutineInvoker } | LEAKPROOF { @@ -7567,7 +7552,7 @@ set_exprs_internal: // %Text: // SET [SESSION] { TO | = } // SET [SESSION] TIME ZONE -// SET [SESSION] CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL { READ COMMITTED | SERIALIZABLE } +// SET [SESSION] CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL { READ COMMITTED | REPEATABLE READ | SERIALIZABLE } // SET [SESSION] TRACING { TO | = } { on | off | cluster | kv | results } [,...] // // %SeeAlso: SHOW SESSION, RESET, DISCARD, SHOW, SET CLUSTER SETTING, SET TRANSACTION, SET LOCAL @@ -7624,7 +7609,7 @@ set_local_stmt: // SET [SESSION] TRANSACTION // // Transaction parameters: -// ISOLATION LEVEL { READ COMMITTED | SERIALIZABLE } +// ISOLATION LEVEL { READ COMMITTED | REPEATABLE READ | SERIALIZABLE } // PRIORITY { LOW | NORMAL | HIGH } // AS OF SYSTEM TIME // [NOT] DEFERRABLE @@ -9628,10 +9613,10 @@ show_locality_stmt: } show_fingerprints_stmt: - SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE table_name + SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE table_name opt_with_show_fingerprints_options { /* SKIP DOC */ - $$.val = &tree.ShowFingerprints{Table: $5.unresolvedObjectName()} + $$.val = &tree.ShowFingerprints{Table: $5.unresolvedObjectName(), Options: *$6.showFingerprintOptions()} } | SHOW EXPERIMENTAL_FINGERPRINTS FROM virtual_cluster virtual_cluster_spec opt_with_show_fingerprints_options { @@ -9672,6 +9657,11 @@ fingerprint_options: { $$.val = &tree.ShowFingerprintOptions{StartTimestamp: $4.expr()} } +| EXCLUDE COLUMNS '=' string_or_placeholder_opt_list + { + $$.val = &tree.ShowFingerprintOptions{ExcludedUserColumns: $4.stringOrPlaceholderOptList()} + } + show_full_scans_stmt: @@ -12530,7 +12520,7 @@ transaction_stmt: // START TRANSACTION [ [[,] ...] ] // // Transaction parameters: -// ISOLATION LEVEL { READ COMMITTED | SERIALIZABLE } +// ISOLATION LEVEL { READ COMMITTED | REPEATABLE READ | SERIALIZABLE } // PRIORITY { LOW | NORMAL | HIGH } // // %SeeAlso: COMMIT, ROLLBACK, WEBDOCS/begin-transaction.html @@ -17742,6 +17732,7 @@ unreserved_keyword: | NOWAIT | NULLS | IGNORE_FOREIGN_KEYS +| IGNORE_CDC_IGNORED_TTL_DELETES | INSENSITIVE | OF | OFF @@ -18169,6 +18160,7 @@ bare_label_keywords: | IFERROR | IFNULL | IGNORE_FOREIGN_KEYS +| IGNORE_CDC_IGNORED_TTL_DELETES | ILIKE | IMMEDIATE | IMMEDIATELY diff --git a/pkg/sql/parser/testdata/alter_function b/pkg/sql/parser/testdata/alter_function index 29b3596306e5..5070afd94b22 100644 --- a/pkg/sql/parser/testdata/alter_function +++ b/pkg/sql/parser/testdata/alter_function @@ -70,3 +70,11 @@ ALTER FUNCTION f(INT8) NO DEPENDS ON EXTENSION postgis -- normalized! ALTER FUNCTION f(INT8) NO DEPENDS ON EXTENSION postgis -- fully parenthesized ALTER FUNCTION f(INT8) NO DEPENDS ON EXTENSION postgis -- literals removed ALTER FUNCTION _(INT8) NO DEPENDS ON EXTENSION postgis -- identifiers removed + +parse +ALTER FUNCTION f(int) EXTERNAL SECURITY DEFINER +---- +ALTER FUNCTION f(INT8) SECURITY DEFINER -- normalized! +ALTER FUNCTION f(INT8) SECURITY DEFINER -- fully parenthesized +ALTER FUNCTION f(INT8) SECURITY DEFINER -- literals removed +ALTER FUNCTION _(INT8) SECURITY DEFINER -- identifiers removed diff --git a/pkg/sql/parser/testdata/begin_commit b/pkg/sql/parser/testdata/begin_commit index 39b1b6422021..dec3fbb058a8 100644 --- a/pkg/sql/parser/testdata/begin_commit +++ b/pkg/sql/parser/testdata/begin_commit @@ -46,6 +46,22 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE -- fully parenthesized BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE -- literals removed BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE -- identifiers removed +parse +BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT +---- +BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT +BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT -- fully parenthesized +BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT -- literals removed +BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT -- identifiers removed + +parse +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ +---- +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ -- fully parenthesized +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ -- literals removed +BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ -- identifiers removed + parse BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED ---- @@ -54,6 +70,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED -- fully parenthesized BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED -- literals removed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED -- identifiers removed +parse +BEGIN TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +---- +BEGIN TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +BEGIN TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- fully parenthesized +BEGIN TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- literals removed +BEGIN TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- identifiers removed + parse BEGIN TRANSACTION PRIORITY LOW ---- diff --git a/pkg/sql/parser/testdata/create_function b/pkg/sql/parser/testdata/create_function index c7da59a7849e..f1a92edf37ad 100644 --- a/pkg/sql/parser/testdata/create_function +++ b/pkg/sql/parser/testdata/create_function @@ -326,90 +326,6 @@ We appreciate your feedback. ---- ---- -error -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL ----- ----- -at or near "definer": syntax error: unimplemented: this syntax -DETAIL: source SQL: -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL - ^ -HINT: You have attempted to use a feature that is not yet implemented. - -Please check the public issue tracker to check whether this problem is -already tracked. If you cannot find it there, please report the error -with details by creating a new issue. - -If you would rather not post publicly, please contact us directly -using the support form. - -We appreciate your feedback. ----- ----- - -error -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL ----- ----- -at or near "invoker": syntax error: unimplemented: this syntax -DETAIL: source SQL: -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL - ^ -HINT: You have attempted to use a feature that is not yet implemented. - -Please check the public issue tracker to check whether this problem is -already tracked. If you cannot find it there, please report the error -with details by creating a new issue. - -If you would rather not post publicly, please contact us directly -using the support form. - -We appreciate your feedback. ----- ----- - -error -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL ----- ----- -at or near "definer": syntax error: unimplemented: this syntax -DETAIL: source SQL: -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL - ^ -HINT: You have attempted to use a feature that is not yet implemented. - -Please check the public issue tracker to check whether this problem is -already tracked. If you cannot find it there, please report the error -with details by creating a new issue. - -If you would rather not post publicly, please contact us directly -using the support form. - -We appreciate your feedback. ----- ----- - -error -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL ----- ----- -at or near "invoker": syntax error: unimplemented: this syntax -DETAIL: source SQL: -CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL - ^ -HINT: You have attempted to use a feature that is not yet implemented. - -Please check the public issue tracker to check whether this problem is -already tracked. If you cannot find it there, please report the error -with details by creating a new issue. - -If you would rather not post publicly, please contact us directly -using the support form. - -We appreciate your feedback. ----- ----- - error CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT ROWS 123 AS 'SELECT 1' LANGUAGE SQL ---- @@ -562,3 +478,99 @@ CREATE FUNCTION f() RETURNS TABLE 'SELECT 1' LANGUAGE SQL ^ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/100226/ + +parse +CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT 7) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT (7)) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT _) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE OR REPLACE FUNCTION _(_ INT8 DEFAULT 7) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT EXTERNAL SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT 7) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT (7)) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT _) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE OR REPLACE FUNCTION _(_ INT8 DEFAULT 7) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT 7) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT (7)) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT _) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE OR REPLACE FUNCTION _(_ INT8 DEFAULT 7) + RETURNS INT8 + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE OR REPLACE FUNCTION f(a int = 7) RETURNS INT SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT 7) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT (7)) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE OR REPLACE FUNCTION f(a INT8 DEFAULT _) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE OR REPLACE FUNCTION _(_ INT8 DEFAULT 7) + RETURNS INT8 + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- identifiers removed diff --git a/pkg/sql/parser/testdata/create_logical_replication b/pkg/sql/parser/testdata/create_logical_replication index 2b9bd3657632..8913f84840de 100644 --- a/pkg/sql/parser/testdata/create_logical_replication +++ b/pkg/sql/parser/testdata/create_logical_replication @@ -54,6 +54,14 @@ CREATE LOGICAL REPLICATION STREAM FROM TABLE (foo) ON ('uri') INTO TABLE (foo) W CREATE LOGICAL REPLICATION STREAM FROM TABLE foo ON '_' INTO TABLE foo WITH OPTIONS (CURSOR = '_', DEFAULT FUNCTION = '_', MODE = '_', FUNCTION a FOR TABLE b, FUNCTION c FOR TABLE d) -- literals removed CREATE LOGICAL REPLICATION STREAM FROM TABLE _ ON 'uri' INTO TABLE _ WITH OPTIONS (CURSOR = '1536242855577149065.0000000000', DEFAULT FUNCTION = 'lww', MODE = 'immediate', FUNCTION _ FOR TABLE _, FUNCTION _ FOR TABLE _) -- identifiers removed +parse +CREATE LOGICAL REPLICATION STREAM FROM TABLE foo.bar ON 'uri' INTO TABLE foo.bar WITH MODE = 'immediate', IGNORE_CDC_IGNORED_TTL_DELETES; +---- +CREATE LOGICAL REPLICATION STREAM FROM TABLE foo.bar ON 'uri' INTO TABLE foo.bar WITH OPTIONS (MODE = 'immediate', IGNORE_CDC_IGNORED_TTL_DELETES) -- normalized! +CREATE LOGICAL REPLICATION STREAM FROM TABLE (foo.bar) ON ('uri') INTO TABLE (foo.bar) WITH OPTIONS (MODE = ('immediate'), IGNORE_CDC_IGNORED_TTL_DELETES) -- fully parenthesized +CREATE LOGICAL REPLICATION STREAM FROM TABLE foo.bar ON '_' INTO TABLE foo.bar WITH OPTIONS (MODE = '_', IGNORE_CDC_IGNORED_TTL_DELETES) -- literals removed +CREATE LOGICAL REPLICATION STREAM FROM TABLE _._ ON 'uri' INTO TABLE _._ WITH OPTIONS (MODE = 'immediate', IGNORE_CDC_IGNORED_TTL_DELETES) -- identifiers removed + error CREATE LOGICAL REPLICATION STREAM FROM TABLE foo, bar ON 'uri' INTO TABLE foo, bar; ---- diff --git a/pkg/sql/parser/testdata/create_procedure b/pkg/sql/parser/testdata/create_procedure index 8708c8ab52a5..6f8f9b8c3672 100644 --- a/pkg/sql/parser/testdata/create_procedure +++ b/pkg/sql/parser/testdata/create_procedure @@ -113,27 +113,6 @@ We appreciate your feedback. ---- ---- -error -CREATE PROCEDURE f() EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL ----- ----- -at or near "definer": syntax error: unimplemented: this syntax -DETAIL: source SQL: -CREATE PROCEDURE f() EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL - ^ -HINT: You have attempted to use a feature that is not yet implemented. - -Please check the public issue tracker to check whether this problem is -already tracked. If you cannot find it there, please report the error -with details by creating a new issue. - -If you would rather not post publicly, please contact us directly -using the support form. - -We appreciate your feedback. ----- ----- - error CREATE PROCEDURE f() SET a = 123 AS 'SELECT 1' LANGUAGE SQL ---- @@ -212,3 +191,83 @@ CREATE PROCEDURE "family"() BEGIN ATOMIC START TRANSACTION; COMMIT TRANSACTION; END -- literals removed CREATE PROCEDURE _() BEGIN ATOMIC START TRANSACTION; COMMIT TRANSACTION; END -- identifiers removed + +parse +CREATE PROCEDURE f() EXTERNAL SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE PROCEDURE _() + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE PROCEDURE f() EXTERNAL SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE PROCEDURE _() + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE PROCEDURE f() SECURITY DEFINER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE PROCEDURE f() + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE PROCEDURE _() + SECURITY DEFINER + LANGUAGE SQL + AS $$_$$ -- identifiers removed + +parse +CREATE PROCEDURE f() SECURITY INVOKER AS 'SELECT 1' LANGUAGE SQL +---- +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- normalized! +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$SELECT 1$$ -- fully parenthesized +CREATE PROCEDURE f() + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- literals removed +CREATE PROCEDURE _() + SECURITY INVOKER + LANGUAGE SQL + AS $$_$$ -- identifiers removed diff --git a/pkg/sql/parser/testdata/create_virtual_cluster b/pkg/sql/parser/testdata/create_virtual_cluster index 0d149f113b26..bcff8f337060 100644 --- a/pkg/sql/parser/testdata/create_virtual_cluster +++ b/pkg/sql/parser/testdata/create_virtual_cluster @@ -30,22 +30,6 @@ CREATE VIRTUAL CLUSTER IF NOT EXISTS (bar) -- fully parenthesized CREATE VIRTUAL CLUSTER IF NOT EXISTS bar -- literals removed CREATE VIRTUAL CLUSTER IF NOT EXISTS _ -- identifiers removed -parse -CREATE VIRTUAL CLUSTER foo LIKE bar ----- -CREATE VIRTUAL CLUSTER foo LIKE bar -CREATE VIRTUAL CLUSTER (foo) LIKE (bar) -- fully parenthesized -CREATE VIRTUAL CLUSTER foo LIKE bar -- literals removed -CREATE VIRTUAL CLUSTER _ LIKE _ -- identifiers removed - -parse -CREATE VIRTUAL CLUSTER foo LIKE [123] ----- -CREATE VIRTUAL CLUSTER foo LIKE [123] -CREATE VIRTUAL CLUSTER (foo) LIKE [(123)] -- fully parenthesized -CREATE VIRTUAL CLUSTER foo LIKE [_] -- literals removed -CREATE VIRTUAL CLUSTER _ LIKE [123] -- identifiers removed - parse CREATE VIRTUAL CLUSTER destination FROM REPLICATION OF source ON 'pgurl' ---- @@ -62,22 +46,6 @@ CREATE VIRTUAL CLUSTER IF NOT EXISTS (destination) FROM REPLICATION OF (source) CREATE VIRTUAL CLUSTER IF NOT EXISTS destination FROM REPLICATION OF source ON '_' -- literals removed CREATE VIRTUAL CLUSTER IF NOT EXISTS _ FROM REPLICATION OF _ ON 'pgurl' -- identifiers removed -parse -CREATE VIRTUAL CLUSTER destination LIKE bar FROM REPLICATION OF source ON 'pgurl' ----- -CREATE VIRTUAL CLUSTER destination LIKE bar FROM REPLICATION OF source ON 'pgurl' -CREATE VIRTUAL CLUSTER (destination) LIKE (bar) FROM REPLICATION OF (source) ON ('pgurl') -- fully parenthesized -CREATE VIRTUAL CLUSTER destination LIKE bar FROM REPLICATION OF source ON '_' -- literals removed -CREATE VIRTUAL CLUSTER _ LIKE _ FROM REPLICATION OF _ ON 'pgurl' -- identifiers removed - -parse -CREATE VIRTUAL CLUSTER destination LIKE [123] FROM REPLICATION OF source ON 'pgurl' ----- -CREATE VIRTUAL CLUSTER destination LIKE [123] FROM REPLICATION OF source ON 'pgurl' -CREATE VIRTUAL CLUSTER (destination) LIKE [(123)] FROM REPLICATION OF (source) ON ('pgurl') -- fully parenthesized -CREATE VIRTUAL CLUSTER destination LIKE [_] FROM REPLICATION OF source ON '_' -- literals removed -CREATE VIRTUAL CLUSTER _ LIKE [123] FROM REPLICATION OF _ ON 'pgurl' -- identifiers removed - parse CREATE VIRTUAL CLUSTER "destination-hyphen" FROM REPLICATION OF "source-hyphen" ON 'pgurl' ---- diff --git a/pkg/sql/parser/testdata/set b/pkg/sql/parser/testdata/set index 2b6004a58522..a8cf3cab735f 100644 --- a/pkg/sql/parser/testdata/set +++ b/pkg/sql/parser/testdata/set @@ -103,6 +103,22 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE -- fully parenthesized SET TRANSACTION ISOLATION LEVEL SERIALIZABLE -- literals removed SET TRANSACTION ISOLATION LEVEL SERIALIZABLE -- identifiers removed +parse +SET TRANSACTION ISOLATION LEVEL SNAPSHOT +---- +SET TRANSACTION ISOLATION LEVEL SNAPSHOT +SET TRANSACTION ISOLATION LEVEL SNAPSHOT -- fully parenthesized +SET TRANSACTION ISOLATION LEVEL SNAPSHOT -- literals removed +SET TRANSACTION ISOLATION LEVEL SNAPSHOT -- identifiers removed + +parse +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ +---- +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ -- fully parenthesized +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ -- literals removed +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ -- identifiers removed + parse SET TRANSACTION ISOLATION LEVEL READ COMMITTED ---- @@ -111,6 +127,14 @@ SET TRANSACTION ISOLATION LEVEL READ COMMITTED -- fully parenthesized SET TRANSACTION ISOLATION LEVEL READ COMMITTED -- literals removed SET TRANSACTION ISOLATION LEVEL READ COMMITTED -- identifiers removed +parse +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +---- +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- fully parenthesized +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- literals removed +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED -- identifiers removed + parse SET TRANSACTION PRIORITY LOW ---- diff --git a/pkg/sql/parser/testdata/show b/pkg/sql/parser/testdata/show index ea47c96d75ac..2a5d40e5369b 100644 --- a/pkg/sql/parser/testdata/show +++ b/pkg/sql/parser/testdata/show @@ -1323,6 +1323,14 @@ SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t -- fully parenthesized SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t -- literals removed SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE _._ -- identifiers removed +parse +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t WITH START TIMESTAMP = '132412341234.000000', EXCLUDE COLUMNS = ('crdb_original_replication_timestamp', 'other_column') +---- +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t WITH OPTIONS (START TIMESTAMP = '132412341234.000000', EXCLUDE COLUMNS = ('crdb_original_replication_timestamp', 'other_column')) -- normalized! +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t WITH OPTIONS (START TIMESTAMP = ('132412341234.000000'), EXCLUDE COLUMNS = (('crdb_original_replication_timestamp'), ('other_column'))) -- fully parenthesized +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE d.t WITH OPTIONS (START TIMESTAMP = '_', EXCLUDE COLUMNS = ('_', '_')) -- literals removed +SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE _._ WITH OPTIONS (START TIMESTAMP = '132412341234.000000', EXCLUDE COLUMNS = ('crdb_original_replication_timestamp', 'other_column')) -- identifiers removed + parse SHOW EXPERIMENTAL_FINGERPRINTS FROM VIRTUAL CLUSTER t ---- diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index baa01c29bb08..8ac15b58cc8c 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -1177,12 +1177,11 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( includesCompositeTypes := populateFromType != nil populateAll := func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - if err := forEachTableDescWithTableLookup( - ctx, - p, - dbContext, - virtualOpts, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, lookup tableLookupFn) error { + + opts := forEachTableDescOptions{virtualOpts: virtualOpts} /* no constraints in virtual tables */ + if err := forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table, lookup := descCtx.database, descCtx.schema, descCtx.table, descCtx.tableLookup return populateFromTable(ctx, p, h, db, sc, table, lookup, addRow) }, ); err != nil { @@ -1647,13 +1646,10 @@ https://www.postgresql.org/docs/9.5/catalog-pg-depend.html`, return errors.New("could not find pg_catalog.pg_rewrite") } h := makeOidHasher() - err = forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /*virtual tables have no constraints*/, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /*virtual tables have no constraints*/ + err = forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + db, sc, table, tableLookup := descCtx.database, descCtx.schema, descCtx.table, descCtx.tableLookup pgConstraintTableOid := tableOid(pgConstraintsDesc.GetID()) pgClassTableOid := tableOid(pgClassDesc.GetID()) pgRewriteTableOid := tableOid(pgRewriteDesc.GetID()) @@ -1943,8 +1939,10 @@ https://www.postgresql.org/docs/9.5/catalog-pg-index.html`, schema: vtable.PGCatalogIndex, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual tables do not have indexes */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual tables do not have indexes */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table tableOid := tableOid(table.GetID()) return catalog.ForEachIndex(table, catalog.IndexOpts{}, func(index catalog.Index) error { @@ -2071,8 +2069,10 @@ https://www.postgresql.org/docs/9.5/view-pg-indexes.html`, schema: vtable.PGCatalogIndexes, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual, /* virtual tables do not have indexes */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, _ tableLookupFn) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual tables do not have indexes */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table scNameName := tree.NewDName(sc.GetName()) tblName := tree.NewDName(table.GetName()) return catalog.ForEachIndex(table, catalog.IndexOpts{}, func(index catalog.Index) error { @@ -2189,8 +2189,10 @@ var pgCatalogMatViewsTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.6/view-pg-matviews.html`, schema: vtable.PGCatalogMatViews, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, desc catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, desc := descCtx.schema, descCtx.table if !desc.MaterializedView() { return nil } @@ -2872,13 +2874,10 @@ https://www.postgresql.org/docs/9.5/catalog-pg-rewrite.html`, h := makeOidHasher() ruleName := tree.NewDString("_RETURN") evType := tree.NewDString(string(evTypeSelect)) - return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /*virtual tables have no constraints*/, func( - ctx context.Context, - db catalog.DatabaseDescriptor, - sc catalog.SchemaDescriptor, - table catalog.TableDescriptor, - tableLookup tableLookupFn, - ) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /*virtual tables have no constraints*/ + return forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table if !table.IsTable() && !table.IsView() { return nil } @@ -2974,8 +2973,10 @@ var pgCatalogSequenceTable = virtualSchemaTable{ https://www.postgresql.org/docs/9.5/catalog-pg-sequence.html`, schema: vtable.PGCatalogSequence, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual schemas do not have indexes */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual schemas do not have indexes */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + table := descCtx.table if !table.IsSequence() { return nil } @@ -3122,8 +3123,10 @@ https://www.postgresql.org/docs/9.6/catalog-pg-shdepend.html`, } // Populating table descriptor dependencies with roles - if err = forEachTableDesc(ctx, p, dbContext, virtualMany, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany} + if err = forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, table := descCtx.database, descCtx.table privDesc, err := p.getPrivilegeDescriptor(ctx, table) if err != nil { return err @@ -3204,8 +3207,10 @@ https://www.postgresql.org/docs/9.5/view-pg-tables.html`, // Note: pg_catalog.pg_tables is not well-defined if the dbContext is // empty -- listing tables across databases can yield duplicate // schema/table names. - return forEachTableDesc(ctx, p, dbContext, virtualMany, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: virtualMany} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, table := descCtx.schema, descCtx.table if !table.IsTable() { return nil } @@ -3590,14 +3595,12 @@ https://www.postgresql.org/docs/9.5/catalog-pg-type.html`, } // Each table has a corresponding pg_type row. - if err := forEachTableDescWithTableLookup( - ctx, - p, - dbContext, - virtualCurrentDB, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor, lookup tableLookupFn) error { - return addPGTypeRowForTable(ctx, p, h, db, sc, table, addRow) - }, + + opts := forEachTableDescOptions{virtualOpts: virtualCurrentDB} + if err := forEachTableDesc(ctx, p, dbContext, opts, func( + ctx context.Context, descCtx tableDescContext) error { + return addPGTypeRowForTable(ctx, p, h, descCtx.database, descCtx.schema, descCtx.table, addRow) + }, ); err != nil { return err } @@ -3924,7 +3927,9 @@ https://www.postgresql.org/docs/13/catalog-pg-statistic-ext.html`, } return err } - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, tbl, db, false /* allowAdding */) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, tbl, db, false /* allowAdding */, false /* includeDropped */) + if err != nil { return err } @@ -3964,8 +3969,11 @@ https://www.postgresql.org/docs/13/view-pg-sequences.html `, schema: vtable.PgCatalogSequences, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual schemas do not have indexes */ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /* virtual schemas do not have indexes */ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, table := descCtx.schema, descCtx.table if !table.IsSequence() { return nil } @@ -4807,8 +4815,11 @@ https://www.postgresql.org/docs/9.5/view-pg-views.html`, populate: func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { // Note: pg_views is not well defined if the dbContext is empty, // because it does not distinguish views in separate databases. - return forEachTableDesc(ctx, p, dbContext, hideVirtual, /*virtual schemas do not have views*/ - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, desc catalog.TableDescriptor) error { + + opts := forEachTableDescOptions{virtualOpts: hideVirtual} /*virtual schemas do not have views*/ + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + sc, desc := descCtx.schema, descCtx.table if !desc.IsView() || desc.MaterializedView() { return nil } @@ -5227,7 +5238,8 @@ func populateVirtualIndexForTable( // Don't include tables that aren't in the current database unless // they're virtual, dropped tables, or ones that the user can't see. - canSeeDescriptor, err := userCanSeeDescriptor(ctx, p, tableDesc, dbContext, true /*allowAdding*/) + canSeeDescriptor, err := userCanSeeDescriptor( + ctx, p, tableDesc, dbContext, true /*allowAdding*/, false /* includeDropped */) if err != nil { return false, err } diff --git a/pkg/sql/pg_extension.go b/pkg/sql/pg_extension.go index cb78f707a03e..1d6a8358b8d2 100644 --- a/pkg/sql/pg_extension.go +++ b/pkg/sql/pg_extension.go @@ -41,12 +41,10 @@ func postgisColumnsTablePopulator( matchingFamily types.Family, ) func(context.Context, *planner, catalog.DatabaseDescriptor, func(...tree.Datum) error) error { return func(ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc( - ctx, - p, - dbContext, - hideVirtual, - func(ctx context.Context, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, table catalog.TableDescriptor) error { + opts := forEachTableDescOptions{virtualOpts: hideVirtual} + return forEachTableDesc(ctx, p, dbContext, opts, + func(ctx context.Context, descCtx tableDescContext) error { + db, sc, table := descCtx.database, descCtx.schema, descCtx.table if !table.IsPhysicalTable() { return nil } diff --git a/pkg/sql/pgwire/auth.go b/pkg/sql/pgwire/auth.go index c13e3629ff10..8ff9756f8894 100644 --- a/pkg/sql/pgwire/auth.go +++ b/pkg/sql/pgwire/auth.go @@ -112,7 +112,7 @@ func (c *conn) handleAuthentication( // Populate the AuthMethod with per-connection information so that it // can compose the next layer of behaviors that we're going to apply // to the incoming connection. - behaviors, err := authMethod(ctx, ac, tlsState, execCfg, hbaEntry, authOpt.identMap) + behaviors, err := authMethod(ctx, ac, c.sessionArgs.User, tlsState, execCfg, hbaEntry, authOpt.identMap) connClose = behaviors.ConnClose if err != nil { ac.LogAuthFailed(ctx, eventpb.AuthFailReason_UNKNOWN, err) diff --git a/pkg/sql/pgwire/auth_methods.go b/pkg/sql/pgwire/auth_methods.go index 0cffcf224f16..122997ef2806 100644 --- a/pkg/sql/pgwire/auth_methods.go +++ b/pkg/sql/pgwire/auth_methods.go @@ -99,6 +99,7 @@ func loadDefaultMethods() { type AuthMethod = func( ctx context.Context, c AuthConn, + sessionUser username.SQLUsername, tlsState tls.ConnectionState, execCfg *sql.ExecutorConfig, entry *hba.Entry, @@ -122,6 +123,7 @@ var _ AuthMethod = authLDAP func authPassword( _ context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, _ *hba.Entry, @@ -246,6 +248,7 @@ func passwordString(pwdData []byte) (string, error) { func authScram( ctx context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, _ *hba.Entry, @@ -424,6 +427,7 @@ func scramAuthenticator( func authCert( _ context.Context, _ AuthConn, + _ username.SQLUsername, tlsState tls.ConnectionState, execCfg *sql.ExecutorConfig, hbaEntry *hba.Entry, @@ -487,6 +491,7 @@ func authCert( func authCertPassword( ctx context.Context, c AuthConn, + sessionUser username.SQLUsername, tlsState tls.ConnectionState, execCfg *sql.ExecutorConfig, entry *hba.Entry, @@ -507,7 +512,7 @@ func authCertPassword( c.LogAuthInfof(ctx, "client presented certificate, proceeding with certificate validation") fn = authCert } - return fn(ctx, c, tlsState, execCfg, entry, identMap) + return fn(ctx, c, sessionUser, tlsState, execCfg, entry, identMap) } // AutoSelectPasswordAuth determines whether CockroachDB automatically promotes the password @@ -532,6 +537,7 @@ var AutoSelectPasswordAuth = settings.RegisterBoolSetting( func authAutoSelectPasswordProtocol( _ context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, _ *hba.Entry, @@ -612,6 +618,7 @@ func authAutoSelectPasswordProtocol( func authCertScram( ctx context.Context, c AuthConn, + sessionUser username.SQLUsername, tlsState tls.ConnectionState, execCfg *sql.ExecutorConfig, entry *hba.Entry, @@ -625,7 +632,7 @@ func authCertScram( c.LogAuthInfof(ctx, "client presented certificate, proceeding with certificate validation") fn = authCert } - return fn(ctx, c, tlsState, execCfg, entry, identMap) + return fn(ctx, c, sessionUser, tlsState, execCfg, entry, identMap) } // authTrust is the AuthMethod constructor for HBA method "trust": @@ -633,6 +640,7 @@ func authCertScram( func authTrust( _ context.Context, _ AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, _ *sql.ExecutorConfig, _ *hba.Entry, @@ -651,6 +659,7 @@ func authTrust( func authReject( _ context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, _ *sql.ExecutorConfig, _ *hba.Entry, @@ -682,6 +691,7 @@ func authSessionRevivalToken(token []byte) AuthMethod { return func( _ context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, _ *hba.Entry, @@ -750,6 +760,7 @@ var ConfigureJWTAuth = func( func authJwtToken( sctx context.Context, c AuthConn, + _ username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, _ *hba.Entry, @@ -803,34 +814,60 @@ func authJwtToken( return b, nil } -// LDAPVerifier is an interface for `ldapauthccl` pkg to add ldap login support. -type LDAPVerifier interface { +// LDAPManager is an interface for `ldapauthccl` pkg to add ldap login(authN) +// and groups sync(authZ) support. +type LDAPManager interface { + // FetchLDAPUserDN extracts the user distinguished name for the sql session + // user performing a lookup for the user on ldap server using options provided + // in the hba conf and supplied sql username in db connection string. + FetchLDAPUserDN(_ context.Context, _ *cluster.Settings, + _ username.SQLUsername, + _ *hba.Entry, + _ *identmap.Conf, + ) (userDN *ldap.DN, detailedErrorMsg redact.RedactableString, authError error) // ValidateLDAPLogin validates whether the password supplied could be used to - // bind to ldap server with a distinguished name obtained from performing a - // search operation using options provided in the hba conf and supplied sql - // username in db connection string. + // bind to ldap server with the ldap user DN(provided as systemIdentityDN + // being the "externally-defined" system identity). ValidateLDAPLogin(_ context.Context, _ *cluster.Settings, + _ *ldap.DN, _ username.SQLUsername, _ string, _ *hba.Entry, _ *identmap.Conf, ) (detailedErrorMsg redact.RedactableString, authError error) + // FetchLDAPGroups retrieves ldap groups for the supplied ldap user + // DN(provided as systemIdentityDN being the "externally-defined" system + // identity) performing a group search with the options provided in the hba + // conf and filtering for the groups which have the user DN as its member. + FetchLDAPGroups(_ context.Context, _ *cluster.Settings, + _ *ldap.DN, + _ username.SQLUsername, + _ *hba.Entry, + _ *identmap.Conf, + ) (ldapGroups []*ldap.DN, detailedErrorMsg redact.RedactableString, authError error) } -// ldapVerifier is a singleton global pgwire object which gets initialized from +// ldapManager is a singleton global pgwire object which gets initialized from // authLDAP method whenever an LDAP auth attempt happens. It depends on ldapccl // module to be imported properly to override its default ConfigureLDAPAuth // constructor. -var ldapVerifier = struct { +var ldapManager = struct { sync.Once - v LDAPVerifier + m LDAPManager }{} type noLDAPConfigured struct{} +func (c *noLDAPConfigured) FetchLDAPUserDN( + _ context.Context, _ *cluster.Settings, _ username.SQLUsername, _ *hba.Entry, _ *identmap.Conf, +) (retrievedUserDN *ldap.DN, detailedErrorMsg redact.RedactableString, authError error) { + return nil, "", errors.New("LDAP based authentication requires CCL features") +} + func (c *noLDAPConfigured) ValidateLDAPLogin( _ context.Context, _ *cluster.Settings, + _ *ldap.DN, _ username.SQLUsername, _ string, _ *hba.Entry, @@ -839,14 +876,25 @@ func (c *noLDAPConfigured) ValidateLDAPLogin( return "", errors.New("LDAP based authentication requires CCL features") } +func (c *noLDAPConfigured) FetchLDAPGroups( + _ context.Context, + _ *cluster.Settings, + _ *ldap.DN, + _ username.SQLUsername, + _ *hba.Entry, + _ *identmap.Conf, +) (ldapGroups []*ldap.DN, detailedErrorMsg redact.RedactableString, authError error) { + return nil, "", errors.New("LDAP based authorization requires CCL features") +} + // ConfigureLDAPAuth is a hook for the `ldapauthccl` library to add LDAP login -// support. It's called to setup the LDAPVerifier just as it is needed. +// support. It's called to setup the LDAPManager just as it is needed. var ConfigureLDAPAuth = func( serverCtx context.Context, ambientCtx log.AmbientContext, st *cluster.Settings, clusterUUID uuid.UUID, -) LDAPVerifier { +) LDAPManager { return &noLDAPConfigured{} } @@ -855,19 +903,38 @@ var ConfigureLDAPAuth = func( func authLDAP( sCtx context.Context, c AuthConn, + sessionUser username.SQLUsername, _ tls.ConnectionState, execCfg *sql.ExecutorConfig, entry *hba.Entry, identMap *identmap.Conf, ) (*AuthBehaviors, error) { - ldapVerifier.Do(func() { - if ldapVerifier.v == nil { - ldapVerifier.v = ConfigureLDAPAuth(sCtx, execCfg.AmbientCtx, execCfg.Settings, execCfg.NodeInfo.LogicalClusterID()) + ldapManager.Do(func() { + if ldapManager.m == nil { + ldapManager.m = ConfigureLDAPAuth(sCtx, execCfg.AmbientCtx, execCfg.Settings, execCfg.NodeInfo.LogicalClusterID()) } }) - b := &AuthBehaviors{} b.SetRoleMapper(UseProvidedIdentity) + + ldapUserDN, detailedErrors, authError := ldapManager.m.FetchLDAPUserDN(sCtx, execCfg.Settings, sessionUser, entry, identMap) + if authError != nil { + errForLog := authError + if detailedErrors != "" { + errForLog = errors.Join(errForLog, errors.Newf("%s", detailedErrors)) + } + c.LogAuthFailed(sCtx, eventpb.AuthFailReason_USER_RETRIEVAL_ERROR, errForLog) + return b, authError + } else { + // The DN of user from LDAP server is set as the system identity DN which + // can then be used for authenticator & authorizer AuthBehaviors fn. + externalUserDN, err := username.MakeSQLUsernameFromUserInput(ldapUserDN.String(), username.PurposeValidation) + if err != nil { + log.Warningf(sCtx, "cannot create sql user for retrieved DN from LDAP server: %+v", err) + } + c.SetSystemIdentity(externalUserDN) + } + b.SetAuthenticator(func(ctx context.Context, user username.SQLUsername, clientConnection bool, _ PasswordRetrievalFn, _ *ldap.DN) error { c.LogAuthInfof(ctx, "LDAP password provided; attempting to bind to domain") if !clientConnection { @@ -898,7 +965,7 @@ func authLDAP( if len(ldapPwd) == 0 { return security.NewErrPasswordUserAuthFailed(user) } - if detailedErrors, authError := ldapVerifier.v.ValidateLDAPLogin(ctx, execCfg.Settings, user, ldapPwd, entry, identMap); authError != nil { + if detailedErrors, authError := ldapManager.m.ValidateLDAPLogin(ctx, execCfg.Settings, ldapUserDN, user, ldapPwd, entry, identMap); authError != nil { errForLog := authError if detailedErrors != "" { errForLog = errors.Join(errForLog, errors.Newf("%s", detailedErrors)) @@ -908,5 +975,7 @@ func authLDAP( } return nil }) + // TODO(souravcrl): add authorizer auth behavior b.SetAuthorizer() for syncing LDAP groups + return b, nil } diff --git a/pkg/sql/pgwire/pgwirebase/encoding.go b/pkg/sql/pgwire/pgwirebase/encoding.go index 9138d56e998f..b38992b224f4 100644 --- a/pkg/sql/pgwire/pgwirebase/encoding.go +++ b/pkg/sql/pgwire/pgwirebase/encoding.go @@ -921,6 +921,12 @@ type PGNumeric struct { // for a timestamp. To create a timestamp from this value, it takes the microseconds // delta and adds it to PGEpochJDate. func pgBinaryToTime(i int64) time.Time { + if i == math.MaxInt64 { + return pgdate.TimeInfinity + } + if i == math.MinInt64 { + return pgdate.TimeNegativeInfinity + } return duration.AddMicros(PGEpochJDate, i) } diff --git a/pkg/sql/pgwire/testdata/encodings.json b/pkg/sql/pgwire/testdata/encodings.json index cf0993a26363..8cc4e2f99952 100644 --- a/pkg/sql/pgwire/testdata/encodings.json +++ b/pkg/sql/pgwire/testdata/encodings.json @@ -1980,6 +1980,20 @@ "TextAsBinary": [57, 48, 48, 52, 45, 49, 48, 45, 49, 57, 32, 49, 48, 58, 50, 51, 58, 53, 52], "Binary": [3, 17, 83, 233, 31, 54, 66, 128] }, + { + "SQL": "'infinity'::timestamp", + "Oid": 1114, + "Text": "infinity", + "TextAsBinary": [105, 110, 102, 105, 110, 105, 116, 121], + "Binary": [127, 255, 255, 255, 255, 255, 255, 255] + }, + { + "SQL": "'-infinity'::timestamp", + "Oid": 1114, + "Text": "-infinity", + "TextAsBinary": [45, 105, 110, 102, 105, 110, 105, 116, 121], + "Binary": [128, 0, 0, 0, 0, 0, 0, 0] + }, { "SQL": "'1999-01-08 04:05:06+00'::timestamptz", "Oid": 1184, diff --git a/pkg/sql/pgwire/types.go b/pkg/sql/pgwire/types.go index 7791eebcbfc3..de453fa3a5ec 100644 --- a/pkg/sql/pgwire/types.go +++ b/pkg/sql/pgwire/types.go @@ -905,6 +905,17 @@ func (b *writeBuffer) writeBinaryColumnarElement( // is represented as the number of microseconds between the given time and Jan 1, 2000 // (dubbed the PGEpochJDate), stored within an int64. func timeToPgBinary(t time.Time, offset *time.Location) int64 { + if t == pgdate.TimeInfinity { + // Postgres uses math.MaxInt64 microseconds as the infinity value. + // See: https://github.com/postgres/postgres/blob/42aa1f0ab321fd43cbfdd875dd9e13940b485900/src/include/datatype/timestamp.h#L107. + return math.MaxInt64 + } + if t == pgdate.TimeNegativeInfinity { + // Postgres uses math.MinInt64 microseconds as the negative infinity value. + // See: https://github.com/postgres/postgres/blob/42aa1f0ab321fd43cbfdd875dd9e13940b485900/src/include/datatype/timestamp.h#L107. + return math.MinInt64 + } + if offset != nil { t = t.In(offset) } else { diff --git a/pkg/sql/physicalplan/replicaoracle/oracle.go b/pkg/sql/physicalplan/replicaoracle/oracle.go index 9cf7fd47d28b..efc984307799 100644 --- a/pkg/sql/physicalplan/replicaoracle/oracle.go +++ b/pkg/sql/physicalplan/replicaoracle/oracle.go @@ -211,7 +211,7 @@ func (o *closestOracle) ChoosePreferredReplica( if err != nil { return roachpb.ReplicaDescriptor{}, false, err } - replicas.OptimizeReplicaOrder(o.st, o.nodeID, o.healthFunc, o.latencyFunc, o.locality) + replicas.OptimizeReplicaOrder(ctx, o.st, o.nodeID, o.healthFunc, o.latencyFunc, o.locality) repl := replicas[0].ReplicaDescriptor // There are no "misplanned" ranges if we know the leaseholder, and we're // deliberately choosing non-leaseholder. @@ -279,7 +279,7 @@ func (o *binPackingOracle) ChoosePreferredReplica( if err != nil { return roachpb.ReplicaDescriptor{}, false, err } - replicas.OptimizeReplicaOrder(o.st, o.nodeID, o.healthFunc, o.latencyFunc, o.locality) + replicas.OptimizeReplicaOrder(ctx, o.st, o.nodeID, o.healthFunc, o.latencyFunc, o.locality) // Look for a replica that has been assigned some ranges, but it's not yet full. minLoad := int(math.MaxInt32) diff --git a/pkg/sql/plan_opt.go b/pkg/sql/plan_opt.go index 8fc5b09753dd..d18bd09e4f0c 100644 --- a/pkg/sql/plan_opt.go +++ b/pkg/sql/plan_opt.go @@ -1032,7 +1032,7 @@ func (opc *optPlanningCtx) makeQueryIndexRecommendation( // Save the normalized memo created by the optbuilder. savedMemo := opc.optimizer.DetachMemo(ctx) - // Use the optimizer to fully normalize the memo. We need to do this before + // Use the optimizer to fully optimize the memo. We need to do this before // finding index candidates because the *memo.SortExpr from the sort enforcer // is only added to the memo in this step. The sort expression is required to // determine certain index candidates. diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 2492dcfdd0e2..60e3f2558880 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -338,7 +338,7 @@ func (n *renameTableNode) checkForCrossDbReferences( return nil } - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "a foreign key constraint %q will exist between databases after rename "+ "(see the '%s' cluster setting)", @@ -370,7 +370,7 @@ func (n *renameTableNode) checkForCrossDbReferences( switch { case dependentObject.IsView(): if !allowCrossDatabaseViews.Get(&p.execCfg.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "a view %q reference to this table will refer to another databases after rename "+ "(see the '%s' cluster setting)", @@ -381,7 +381,7 @@ func (n *renameTableNode) checkForCrossDbReferences( } case dependentObject.IsSequence() && depType == owner: if !allowCrossDatabaseSeqOwner.Get(&p.execCfg.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "a sequence %q will be OWNED BY a table in a different database after rename "+ "(see the '%s' cluster setting)", @@ -392,7 +392,7 @@ func (n *renameTableNode) checkForCrossDbReferences( } case dependentObject.IsSequence() && depType == reference: if !allowCrossDatabaseSeqReferences.Get(&p.execCfg.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "a sequence %q will be referenced by a table in a different database after rename "+ "(see the '%s' cluster setting)", @@ -406,7 +406,7 @@ func (n *renameTableNode) checkForCrossDbReferences( if !allowCrossDatabaseViews.Get(&p.execCfg.Settings.SV) { // For view's dependent objects can only be // relations. - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "this view will reference a table %q in another databases after rename "+ "(see the '%s' cluster setting)", @@ -419,7 +419,7 @@ func (n *renameTableNode) checkForCrossDbReferences( if !allowCrossDatabaseSeqReferences.Get(&p.execCfg.Settings.SV) { // For sequences dependent references can only be // a relations. - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "this sequence will be referenced by a table %q in a different database after rename "+ "(see the '%s' cluster setting)", @@ -432,7 +432,7 @@ func (n *renameTableNode) checkForCrossDbReferences( if !allowCrossDatabaseSeqOwner.Get(&p.execCfg.Settings.SV) { // For sequences dependent owners can only be // a relations. - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "this sequence will be OWNED BY a table %q in a different database after rename "+ "(see the '%s' cluster setting)", @@ -457,7 +457,7 @@ func (n *renameTableNode) checkForCrossDbReferences( if dependentObject.GetParentID() == targetDbDesc.GetID() { return nil } - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "this view will reference a type %q in another databases after rename "+ "(see the '%s' cluster setting)", diff --git a/pkg/sql/run_control_test.go b/pkg/sql/run_control_test.go index 408256a19d23..693049f37235 100644 --- a/pkg/sql/run_control_test.go +++ b/pkg/sql/run_control_test.go @@ -1023,7 +1023,7 @@ func TestStatementTimeoutForSchemaChangeCommit(t *testing.T) { blockSchemaChange.Swap(true) if implicitTxn { _, err := conn.DB.ExecContext(ctx, "ALTER TABLE t1 ADD COLUMN j INT DEFAULT 32") - require.Errorf(t, err, sqlerrors.QueryTimeoutError.Error()) + require.ErrorContains(t, err, sqlerrors.QueryTimeoutError.Error()) require.Equal(t, 1, len(actualNotices)) require.Regexp(t, "The statement has timed out, but the following background jobs have been created and will continue running: \\d+", diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 727b034c2fbb..ca7b23b95005 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -3317,7 +3317,7 @@ func (p *planner) CanPerformDropOwnedBy( // owner references are allowed. func (p *planner) CanCreateCrossDBSequenceOwnerRef() error { if !allowCrossDatabaseSeqOwner.Get(&p.execCfg.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.FeatureNotSupported, "OWNED BY cannot refer to other databases; (see the '%s' cluster setting)", allowCrossDatabaseSeqOwnerSetting), @@ -3331,7 +3331,7 @@ func (p *planner) CanCreateCrossDBSequenceOwnerRef() error { // references are allowed. func (p *planner) CanCreateCrossDBSequenceRef() error { if !allowCrossDatabaseSeqReferences.Get(&p.execCfg.Settings.SV) { - return errors.WithHintf( + return errors.WithHint( pgerror.Newf(pgcode.FeatureNotSupported, "sequence references cannot come from other databases; (see the '%s' cluster setting)", allowCrossDatabaseSeqReferencesSetting), diff --git a/pkg/sql/schemachanger/comparator_generated_test.go b/pkg/sql/schemachanger/comparator_generated_test.go index a89a2ea609a4..6b8b3ab66473 100644 --- a/pkg/sql/schemachanger/comparator_generated_test.go +++ b/pkg/sql/schemachanger/comparator_generated_test.go @@ -428,6 +428,11 @@ func TestSchemaChangeComparator_distsql_agg(t *testing.T) { var logicTestFile = "pkg/sql/logictest/testdata/logic_test/distsql_agg" runSchemaChangeComparatorTest(t, logicTestFile) } +func TestSchemaChangeComparator_distsql_automatic_partial_stats(t *testing.T) { + defer leaktest.AfterTest(t)() + var logicTestFile = "pkg/sql/logictest/testdata/logic_test/distsql_automatic_partial_stats" + runSchemaChangeComparatorTest(t, logicTestFile) +} func TestSchemaChangeComparator_distsql_automatic_stats(t *testing.T) { defer leaktest.AfterTest(t)() var logicTestFile = "pkg/sql/logictest/testdata/logic_test/distsql_automatic_stats" diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel index 4f3bd0704a7d..95cd47b05fc4 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "create_index.go", "create_schema.go", "create_sequence.go", + "database_zone_config.go", "dependencies.go", "drop_database.go", "drop_function.go", @@ -31,18 +32,23 @@ go_library( "drop_type.go", "drop_view.go", "helpers.go", + "index_zone_config.go", "process.go", "statement_control.go", + "table_zone_config.go", + "zone_config_helpers.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild/internal/scbuildstmt", visibility = ["//pkg/sql/schemachanger/scbuild:__subpackages__"], deps = [ + "//pkg/base", "//pkg/build", "//pkg/clusterversion", "//pkg/config/zonepb", "//pkg/docs", "//pkg/geo/geoindex", "//pkg/keys", + "//pkg/roachpb", "//pkg/security/username", "//pkg/server/serverpb", "//pkg/server/telemetry", @@ -53,6 +59,7 @@ go_library( "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/fetchpb", "//pkg/sql/catalog/funcinfo", "//pkg/sql/catalog/schemadesc", "//pkg/sql/catalog/schemaexpr", @@ -60,6 +67,7 @@ go_library( "//pkg/sql/catalog/tabledesc", "//pkg/sql/catalog/typedesc", "//pkg/sql/catalog/zone", + "//pkg/sql/covering", "//pkg/sql/decodeusername", "//pkg/sql/paramparse", "//pkg/sql/parser", @@ -67,6 +75,8 @@ go_library( "//pkg/sql/pgwire/pgerror", "//pkg/sql/pgwire/pgnotice", "//pkg/sql/privilege", + "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/schemachange", "//pkg/sql/schemachanger/scdecomp", "//pkg/sql/schemachanger/scerrors", @@ -87,6 +97,7 @@ go_library( "//pkg/sql/storageparam", "//pkg/sql/storageparam/indexstorageparam", "//pkg/sql/types", + "//pkg/util/encoding", "//pkg/util/errorutil/unimplemented", "//pkg/util/log/eventpb", "//pkg/util/log/logpb", diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go index 830b5d56168c..4fa86bd1fda6 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go @@ -15,6 +15,8 @@ import ( "sort" "strings" + "github.com/cockroachdb/cockroach/pkg/docs" + "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catenumpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" @@ -25,12 +27,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdecomp" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -71,7 +75,11 @@ func alterTableAddColumn( } } if d.IsSerial { - panic(scerrors.NotImplementedErrorf(d, "contains serial data type")) + if b.SessionData().SerialNormalizationMode != sessiondatapb.SerialUsesRowID { + panic(scerrors.NotImplementedErrorf(d, "contains serial data type in unsupported mode")) + } + d = alterTableAddColumnSerial(b, d, tn) + } if d.GeneratedIdentity.IsGeneratedAsIdentity { panic(scerrors.NotImplementedErrorf(d, "contains generated identity type")) @@ -259,6 +267,43 @@ func alterTableAddColumn( } } +func alterTableAddColumnSerial( + b BuildCtx, d *tree.ColumnTableDef, tn *tree.TableName, +) *tree.ColumnTableDef { + if err := catalog.AssertValidSerialColumnDef(d, tn); err != nil { + panic(err) + } + + defType, err := tree.ResolveType(b, d.Type, b.SemaCtx().GetTypeResolver()) + if err != nil { + panic(err) + } + + telemetry.Inc(sqltelemetry.SerialColumnNormalizationCounter( + defType.Name(), b.SessionData().SerialNormalizationMode.String())) + + if defType.Width() < types.Int.Width() { + b.EvalCtx().ClientNoticeSender.BufferClientNotice( + b, + errors.WithHintf( + pgnotice.Newf( + "upgrading the column %s to %s to utilize the session serial_normalization setting", + d.Name.String(), + types.Int.SQLString(), + ), + "change the serial_normalization to sql_sequence or sql_sequence_cached if you wish "+ + "to use a smaller sized serial column at the cost of performance. See %s", + docs.URL("serial.html"), + ), + ) + } + + // Serial is an alias for a real column definition. False indicates a remapped alias. + d.IsSerial = false + + return catalog.UseRowID(*d) +} + func columnNamesToIDs(b BuildCtx, tbl *scpb.Table) map[string]descpb.ColumnID { tableElts := b.QueryByID(tbl.TableID) namesToIDs := make(map[string]descpb.ColumnID) diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_constraint.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_constraint.go index 74760bf75f90..93907f075331 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_constraint.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_constraint.go @@ -273,6 +273,8 @@ func alterTableAddForeignKey( panic(scerrors.NotImplementedErrorf(t, "cross DB FK reference is a deprecated feature "+ "and is no longer supported.")) } + // Disallow schema change if the FK references a table whose schema is locked. + panicIfSchemaIsLocked(b.QueryByID(referencedTableID)) // 6. Check that temporary tables can only reference temporary tables, or, // permanent tables can only reference permanent tables. diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/configure_zone.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/configure_zone.go index 3ee7c236f5a3..adb0d90e19af 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/configure_zone.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/configure_zone.go @@ -11,45 +11,17 @@ package scbuildstmt import ( - "context" - "fmt" - - "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/zone" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" - "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" - "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlclustersettings" - "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" ) -// zoneConfigObjType is an enum to represent various types of "objects" that are -// supported by the CONFIGURE ZONE statement. This is used to determine the -// scpb that will be generated. -type zoneConfigObjType int - -const ( - // unspecifiedObj is used when the object type is not specified. - unspecifiedObj zoneConfigObjType = iota - databaseObj - tableObj -) - func SetZoneConfig(b BuildCtx, n *tree.SetZoneConfig) { // Block secondary tenants from ALTER CONFIGURE ZONE unless cluster setting is set. if err := sqlclustersettings.RequireSystemTenantOrClusterSetting( @@ -62,11 +34,11 @@ func SetZoneConfig(b BuildCtx, n *tree.SetZoneConfig) { // Supports: // - Database // - Table - // Left to support: // - Index + // Left to support: // - Partition/row // - System Ranges - objectType, err := fallBackIfNotSupportedZoneConfig(n) + zco, err := astToZoneConfigObject(b, n) if err != nil { panic(err) } @@ -78,16 +50,18 @@ func SetZoneConfig(b BuildCtx, n *tree.SetZoneConfig) { "YAML config is deprecated and not supported in the declarative schema changer")) } - if err := checkPrivilegeForSetZoneConfig(b, n, objectType); err != nil { + zs := n.ZoneSpecifier + if err := zco.checkPrivilegeForSetZoneConfig(b, zs); err != nil { panic(err) } - err = checkZoneConfigChangePermittedForMultiRegion(b, n.ZoneSpecifier, n.Options, objectType) - if err != nil { + if err := zco.checkZoneConfigChangePermittedForMultiRegion( + b, zs, n.Options, + ); err != nil { panic(err) } - options, err := getUpdatedZoneConfigOptions(b, n.Options, n.ZoneSpecifier.TelemetryName()) + options, err := getUpdatedZoneConfigOptions(b, n.Options, zs.TelemetryName()) if err != nil { panic(err) } @@ -97,23 +71,22 @@ func SetZoneConfig(b BuildCtx, n *tree.SetZoneConfig) { panic(err) } - telemetryName := n.ZoneSpecifier.TelemetryName() telemetry.Inc( - sqltelemetry.SchemaChangeAlterCounterWithExtra(telemetryName, "configure_zone"), + sqltelemetry.SchemaChangeAlterCounterWithExtra(zs.TelemetryName(), "configure_zone"), ) - zc, seqNum, err := applyZoneConfig(b, n, copyFromParentList, setters, objectType) - if err != nil { + if err = zco.applyZoneConfig(b, n, copyFromParentList, setters); err != nil { panic(err) } // For tables, we have to directly modify the AST to full resolve the table name. - if objectType == tableObj { + if n.TargetsTable() { resolvePhysicalTableName(b, n) } - elem := addZoneConfigToBuildCtx(b, n, zc, seqNum, objectType) - // Record that the change has occurred for auditing. + elem := zco.addZoneConfigToBuildCtx(b) + + // Log event for auditing eventDetails := eventpb.CommonZoneConfigDetails{ Target: tree.AsString(&n.ZoneSpecifier), Options: optionsStr, @@ -122,187 +95,32 @@ func SetZoneConfig(b BuildCtx, n *tree.SetZoneConfig) { b.LogEventForExistingPayload(elem, info) } -// resolvePhysicalTableName resolves the table name for a physical table -// in the SetZoneConfig AST by directly modifying its TableOrIndex.Table. -func resolvePhysicalTableName(b BuildCtx, n *tree.SetZoneConfig) { - uon := n.ZoneSpecifier.TableOrIndex.Table.ToUnresolvedObjectName() - tn := uon.ToTableName() - elts := b.ResolvePhysicalTable(uon, ResolveParams{}) - tbl := elts.Filter(func(_ scpb.Status, _ scpb.TargetStatus, e scpb.Element) bool { - switch e := e.(type) { - case *scpb.Table: - return true - case *scpb.View: - if e.IsMaterialized { - return true - } - case *scpb.Sequence: - return true - } - return false - }).MustGetOneElement() - tn.ObjectNamePrefix = b.NamePrefix(tbl) - n.ZoneSpecifier.TableOrIndex.Table = tn -} - -// checkPrivilegeForSetZoneConfig checks whether current user has the right -// privilege for configuring zone on the specified object(s). -func checkPrivilegeForSetZoneConfig( - b BuildCtx, n *tree.SetZoneConfig, objType zoneConfigObjType, -) error { - zs := n.ZoneSpecifier - - // For the system database, the user must be an admin. Otherwise, we - // require CREATE or ZONECONFIG privilege on the database in question. - reqNonAdminPrivs := []privilege.Kind{privilege.ZONECONFIG, privilege.CREATE} - if objType == databaseObj { - return checkPrivilegeForDBSetZoneConfig(b, zs, reqNonAdminPrivs) - } - - tblID, err := getTargetIDFromZoneSpecifier(b, zs, objType) - if err != nil { - return err - } - tblElem := mustRetrievePhysicalTableElem(b, tblID) - tblNamespaceElem := mustRetrieveNamespaceElem(b, tblID) - if tblNamespaceElem.DatabaseID == keys.SystemDatabaseID { - return b.CheckGlobalPrivilege(privilege.REPAIRCLUSTER) - } - // Can configure zone of a table (or its index) if user has either CREATE or - // ZONECONFIG privilege on the table. - tableCreatePrivilegeErr := b.CheckPrivilege(tblElem, privilege.CREATE) - tableZoneConfigPrivilegeErr := b.CheckPrivilege(tblElem, privilege.ZONECONFIG) - if tableCreatePrivilegeErr == nil || tableZoneConfigPrivilegeErr == nil { - return nil - } - - return sqlerrors.NewInsufficientPrivilegeOnDescriptorError(b.CurrentUser(), - reqNonAdminPrivs, string(catalog.Table), tblNamespaceElem.Name) -} - -func checkPrivilegeForDBSetZoneConfig( - b BuildCtx, zs tree.ZoneSpecifier, reqNonAdminPrivs []privilege.Kind, -) error { - if zs.Database == "system" { - return b.CheckGlobalPrivilege(privilege.REPAIRCLUSTER) - } - - // Can configure zone of a database if user has either CREATE or ZONECONFIG - // privilege on the database. - dbElem := b.ResolveDatabase(zs.Database, ResolveParams{}).FilterDatabase().MustGetOneElement() - dbCreatePrivilegeErr := b.CheckPrivilege(dbElem, privilege.CREATE) - dbZoneConfigPrivilegeErr := b.CheckPrivilege(dbElem, privilege.ZONECONFIG) - if dbZoneConfigPrivilegeErr == nil || dbCreatePrivilegeErr == nil { - return nil - } - - return sqlerrors.NewInsufficientPrivilegeOnDescriptorError(b.CurrentUser(), - reqNonAdminPrivs, string(catalog.Database), mustRetrieveNamespaceElem(b, dbElem.DatabaseID).Name) -} - -// checkZoneConfigChangePermittedForMultiRegion checks if a zone config -// change is permitted for a multi-region database or table. -// The change is permitted iff it is not modifying a protected multi-region -// field of the zone configs (as defined by zonepb.MultiRegionZoneConfigFields). -func checkZoneConfigChangePermittedForMultiRegion( - b BuildCtx, zs tree.ZoneSpecifier, options tree.KVOptions, objType zoneConfigObjType, -) error { - // If the user has specified that they're overriding, then the world is - // their oyster. - if b.SessionData().OverrideMultiRegionZoneConfigEnabled { - // Note that we increment the telemetry counter unconditionally here. - // It's possible that this will lead to over-counting as the user may - // have left the override on and is now updating a zone configuration - // that is not protected by the multi-region abstractions. To get finer - // grained counting however, would be more difficult to code, and may - // not even prove to be that valuable, so we have decided to live with - // the potential for over-counting. - telemetry.Inc(sqltelemetry.OverrideMultiRegionZoneConfigurationUser) - return nil - } - - isDB := objType == databaseObj - // Check if what we're altering is a multi-region entity. - if isDB { - dbRegionConfigElem := b.ResolveDatabase(zs.Database, - ResolveParams{}).FilterDatabaseRegionConfig().MustGetZeroOrOneElement() - if dbRegionConfigElem == nil { - // Not a multi-region database, we're done here. - return nil - } - } else { - // We're dealing with a table zone configuration change. Get the table descriptor so we can - // determine if this is a multi-region table. - tableID, err := getTargetIDFromZoneSpecifier(b, zs, objType) - if err != nil { - return err - } - if !isMultiRegionTable(b, tableID) { - // Not a multi-region table, we're done here. - return nil - } - } - - hint := "to override this error, SET override_multi_region_zone_config = true and reissue the command" - - // This is clearly an n^2 operation, but since there are only a single - // digit number of zone config keys, it's likely faster to do it this way - // than incur the memory allocation of creating a map. - for _, opt := range options { - for _, cfg := range zonepb.MultiRegionZoneConfigFields { - if opt.Key == cfg { - // User is trying to update a zone config value that's protected for - // multi-region databases. Return the constructed error. - err := errors.Newf("attempting to modify protected field %q of a multi-region zone "+ - "configuration", string(opt.Key), - ) - return errors.WithHint(err, hint) - } - } +func astToZoneConfigObject(b BuildCtx, n *tree.SetZoneConfig) (zoneConfigObject, error) { + if n.Discard { + return nil, scerrors.NotImplementedErrorf(n, "discarding zone configurations is not "+ + "supported in the DSC") } - - return nil -} - -// isMultiRegionTable returns True if this table is a multi-region table, -// meaning it has locality GLOBAL, or REGIONAL BY TABLE, or REGIONAL BY ROW. -func isMultiRegionTable(b BuildCtx, tableID catid.DescID) bool { - tableElems := b.QueryByID(tableID) - globalElem := tableElems.FilterTableLocalityGlobal().MustGetZeroOrOneElement() - primaryRegionElem := tableElems.FilterTableLocalityPrimaryRegion().MustGetZeroOrOneElement() - secondaryRegionElem := tableElems.FilterTableLocalitySecondaryRegion().MustGetZeroOrOneElement() - RBRElem := tableElems.FilterTableLocalityRegionalByRow().MustGetZeroOrOneElement() - return globalElem != nil || primaryRegionElem != nil || secondaryRegionElem != nil || - RBRElem != nil -} - -// getTargetIDFromZoneSpecifier attempts to find the ID of the target by the -// zone specifier. -// Recall that a zone specifier specifies either a database or a table. This -// function will return the ID of the database or the table. -func getTargetIDFromZoneSpecifier( - b BuildCtx, zs tree.ZoneSpecifier, objType zoneConfigObjType, -) (catid.DescID, error) { - if objType == databaseObj { + zs := n.ZoneSpecifier + // We are a database object. + if n.Database != "" { dbElem := b.ResolveDatabase(zs.Database, ResolveParams{}).FilterDatabase().MustGetOneElement() - return dbElem.DatabaseID, nil + return &databaseZoneConfigObj{databaseID: dbElem.DatabaseID}, nil } - return getTableIDFromZoneSpecifier(b, zs, objType) -} -// getTableIDFromZoneSpecifier attempts to find the table ID specified by the -// zone specifier. If the zone does not specify a table, a non-nil error is -// returned. Otherwise (for tables), the associated table ID is returned. -func getTableIDFromZoneSpecifier( - b BuildCtx, zs tree.ZoneSpecifier, objType zoneConfigObjType, -) (catid.DescID, error) { - if objType == databaseObj { - return 0, errors.AssertionFailedf("programming error: zone specifier is for a " + - "database; not a table") + // The rest of the cases are for table elements -- resolve the table ID now. + // Fallback to the legacy schema changer if the table name is not referenced. + // + // TODO(annie): remove this when we have something equivalent to + // expandMutableIndexName in the DSC. + targetsIndex := n.TargetsIndex() && !n.TargetsPartition() + if targetsIndex && n.TableOrIndex.Table.Table() == "" { + return nil, scerrors.NotImplementedErrorf(n, "referencing an index without a table "+ + "prefix is not supported in the DSC") } - if objType != tableObj { - return 0, errors.AssertionFailedf("programming error: zs does not specify a proper " + - "target") + + if !n.TargetsTable() || n.TargetsPartition() { + return nil, scerrors.NotImplementedErrorf(n, "zone configurations on partitions "+ + "and system ranges are not supported in the DSC") } tblName := zs.TableOrIndex.Table.ToUnresolvedObjectName() elems := b.ResolvePhysicalTable(tblName, ResolveParams{}) @@ -320,766 +138,19 @@ func getTableIDFromZoneSpecifier( } }) if tableID == catid.InvalidDescID { - panic("uh oh") - } - return tableID, nil -} - -// getUpdatedZoneConfigOptions unpacks all kv options for a `CONFIGURE ZONE -// USING ...` stmt. It ensures all kv options are supported and the values are -// type-checked and normalized. -func getUpdatedZoneConfigOptions( - b BuildCtx, n tree.KVOptions, telemetryName string, -) (map[tree.Name]zone.OptionValue, error) { - - var options map[tree.Name]zone.OptionValue - // We have a CONFIGURE ZONE USING ... assignment. - if n != nil { - options = make(map[tree.Name]zone.OptionValue) - for _, opt := range n { - if _, alreadyExists := options[opt.Key]; alreadyExists { - return nil, pgerror.Newf(pgcode.InvalidParameterValue, - "duplicate zone config parameter: %q", tree.ErrString(&opt.Key)) - } - // Here we are constrained by the supported ZoneConfig fields, - // as described by zone.SupportedZoneConfigOptions. - req, ok := zone.SupportedZoneConfigOptions[opt.Key] - if !ok { - return nil, pgerror.Newf(pgcode.InvalidParameterValue, - "unsupported zone config parameter: %q", tree.ErrString(&opt.Key)) - } - telemetry.Inc( - sqltelemetry.SchemaSetZoneConfigCounter( - telemetryName, - string(opt.Key), - ), - ) - if opt.Value == nil { - options[opt.Key] = zone.OptionValue{InheritValue: true, ExplicitValue: nil} - continue - } - - // Type check and normalize value expr. - typedExpr, err := tree.TypeCheckAndRequire(b, opt.Value, b.SemaCtx(), req.RequiredType, - string(opt.Key)) - if err != nil { - return nil, err - } - etctx := transform.ExprTransformContext{} - valExpr, err := etctx.NormalizeExpr(b, b.EvalCtx(), typedExpr) - if err != nil { - return nil, err - } - - options[opt.Key] = zone.OptionValue{InheritValue: false, ExplicitValue: valExpr} - } - } - return options, nil -} - -func evaluateZoneOptions( - b BuildCtx, options map[tree.Name]zone.OptionValue, -) ( - optionsStr []string, - copyFromParentList []tree.Name, - setters []func(c *zonepb.ZoneConfig), - err error, -) { - if options != nil { - // Set from var = value attributes. - // - // We iterate over zoneOptionKeys instead of iterating over - // n.options directly so that the optionStr string constructed for - // the event log remains deterministic. - for i := range zone.ZoneOptionKeys { - name := (*tree.Name)(&zone.ZoneOptionKeys[i]) - val, ok := options[*name] - if !ok { - continue - } - // We don't add the setters for the fields that will copy values - // from the parents. These fields will be set by taking what - // value would apply to the zone and setting that value explicitly. - // Instead, we add the fields to a list that we use at a later time - // to copy values over. - inheritVal, expr := val.InheritValue, val.ExplicitValue - if inheritVal { - copyFromParentList = append(copyFromParentList, *name) - optionsStr = append(optionsStr, fmt.Sprintf("%s = COPY FROM PARENT", name)) - continue - } - datum, err := eval.Expr(b, b.EvalCtx(), expr) - if err != nil { - return nil, nil, nil, err - } - if datum == tree.DNull { - return nil, nil, nil, - pgerror.Newf(pgcode.InvalidParameterValue, "unsupported NULL value for %q", - tree.ErrString(name)) - } - opt := zone.SupportedZoneConfigOptions[*name] // Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group - if opt.CheckAllowed != nil { - if err := opt.CheckAllowed(b, b.ClusterSettings(), datum); err != nil { - return nil, nil, nil, err - } - } - setter := opt.Setter - setters = append(setters, func(c *zonepb.ZoneConfig) { setter(c, datum) }) - optionsStr = append(optionsStr, fmt.Sprintf("%s = %s", name, datum)) - } - } - return optionsStr, copyFromParentList, setters, nil -} - -func applyZoneConfig( - b BuildCtx, - n *tree.SetZoneConfig, - copyFromParentList []tree.Name, - setters []func(c *zonepb.ZoneConfig), - objType zoneConfigObjType, -) (*zonepb.ZoneConfig, uint32, error) { - // Determines the ID of the target object of the zone specifier. This is the - // ID of either a database or a table. - targetID, err := getTargetIDFromZoneSpecifier(b, n.ZoneSpecifier, objType) - if err != nil { - return nil, 0, err - } - - // TODO(annie): once we allow configuring zones for named zones/system ranges, - // we will need to guard against secondary tenants from configuring such - // ranges. - if objType == tableObj { - // Check that we are not trying to configure a system table. - if err = checkIfConfigurationAllowed(targetID); err != nil { - return nil, 0, err - } - } - - // Retrieve the partial zone configuration - partialZone, zc := retrievePartialZoneConfig(b, targetID, objType) - - // Retrieve the zone configuration. - // - // If the statement was USING DEFAULT, we want to ignore the zone - // config that exists on targetID and instead skip to the inherited - // default (default since the targetID is a database). For this, we - // use the last parameter getInheritedDefault to retrieveCompleteZoneConfig(). - // These zones are only used for validations. The merged zone will not - // be written. - _, completeZone, seqNum, err := retrieveCompleteZoneConfig(b, targetID, objType, n.SetDefault /* getInheritedDefault */) - if err != nil { - return nil, 0, err - } - - // We need to inherit zone configuration information from the correct zone, - // not completeZone. - { - // If we are operating on a zone, get all fields that the zone would - // inherit from its parent. We do this by using an empty zoneConfig - // and completing at the level of the current zone. - zoneInheritedFields := zonepb.ZoneConfig{} - if err := completeZoneConfig(b, targetID, objType, &zoneInheritedFields); err != nil { - return nil, 0, err - } - partialZone.CopyFromZone(zoneInheritedFields, copyFromParentList) - } - - newZoneForVerification := *completeZone - finalZone := *partialZone - - if n.SetDefault { - finalZone = *zonepb.NewZoneConfig() - } - - // Load settings from var = val assignments. If there were no such - // settings, (e.g. because the query specified CONFIGURE ZONE = or - // USING DEFAULT), the setter slice will be empty and this will be - // a no-op. This is innocuous. - for _, setter := range setters { - // A setter may fail with an error-via-panic. Catch those. - if err := func() (err error) { - defer func() { - if p := recover(); p != nil { - if errP, ok := p.(error); ok { - // Catch and return the error. - err = errP - } else { - // Nothing we know about, let it continue as a panic. - panic(p) - } - } - }() - - setter(&newZoneForVerification) - setter(&finalZone) - return nil - }(); err != nil { - return nil, 0, err - } - } - - // Validate that there are no conflicts in the zone setup. - if err := zonepb.ValidateNoRepeatKeysInZone(&newZoneForVerification); err != nil { - return nil, 0, err - } - - currentZone := zonepb.NewZoneConfig() - if zc != nil { - currentZone = zc - } - if err := validateZoneAttrsAndLocalities(b, currentZone, &newZoneForVerification); err != nil { - return nil, 0, err - } - - completeZone = &newZoneForVerification - partialZone = &finalZone - - // Since we are writing to a zone that is not a subzone, we need to - // make sure that the zone config is not considered a placeholder - // anymore. If the settings applied to this zone don't touch the - // NumReplicas field, set it to nil so that the zone isn't considered a - // placeholder anymore. - if zc != nil && partialZone.IsSubzonePlaceholder() { - partialZone.NumReplicas = nil - } - - // Finally, revalidate everything. Validate only the completeZone config. - if err := completeZone.Validate(); err != nil { - return nil, 0, pgerror.Wrap(err, pgcode.CheckViolation, "could not validate zone config") - } - - // Finally, check for the extra protection partial zone configs would - // require from changes made to parent zones. The extra protections are: - // - // RangeMinBytes and RangeMaxBytes must be set together - // LeasePreferences cannot be set unless Constraints/VoterConstraints are - // explicitly set - // Per-replica constraints cannot be set unless num_replicas is explicitly - // set - // Per-voter constraints cannot be set unless num_voters is explicitly set - if err := finalZone.ValidateTandemFields(); err != nil { - err = errors.Wrap(err, "could not validate zone config") - err = pgerror.WithCandidateCode(err, pgcode.InvalidParameterValue) - err = errors.WithHint(err, - "try ALTER ... CONFIGURE ZONE USING = COPY FROM PARENT [, ...] to "+ - "populate the field") - return nil, 0, err - } - return partialZone, seqNum, nil -} - -// checkIfConfigurationAllowed determines whether a zone config can be set. -// This consists of checking that we are not trying to configure a system table. -func checkIfConfigurationAllowed(targetID catid.DescID) error { - // Zones of "system config tables" (i.e. `system.descriptor` and - // `system.zones` table) and NamespaceTable cannot be configured. - if descpb.IsSystemConfigID(targetID) || targetID == keys.NamespaceTableID { - return pgerror.Newf(pgcode.InvalidParameterValue, - `cannot set zone configs for system config tables; `+ - `try setting your config on the entire "system" database instead`) - } - return nil -} - -// retrievePartialZoneConfig retrieves the partial zone configuration of the -// specified targetID. -func retrievePartialZoneConfig( - b BuildCtx, targetID catid.DescID, objectType zoneConfigObjType, -) (*zonepb.ZoneConfig, *zonepb.ZoneConfig) { - partialZone := zonepb.NewZoneConfig() - var zc *zonepb.ZoneConfig - - // Retrieve the partial zone configuration for specified objectType. Fall back - // to the default zone configuration if the zone configuration does not exist. - switch objectType { - case databaseObj: - var dbZoneConfigElem *scpb.DatabaseZoneConfig - dbZoneElems := b.QueryByID(targetID).FilterDatabaseZoneConfig() - dbZoneElems.ForEach(func(current scpb.Status, target scpb.TargetStatus, e *scpb.DatabaseZoneConfig) { - // We want to get the most recent change that has not applied yet. For transactions, this will - // be the most recent (last) zone config elem added. - if e.DatabaseID == targetID { - dbZoneConfigElem = e - } - }) - if dbZoneConfigElem != nil { - partialZone = dbZoneConfigElem.ZoneConfig - zc = dbZoneConfigElem.ZoneConfig - } - case tableObj: - var tblZoneConfigElem *scpb.TableZoneConfig - tblZoneElems := b.QueryByID(targetID).FilterTableZoneConfig() - tblZoneElems.ForEach(func(current scpb.Status, target scpb.TargetStatus, e *scpb.TableZoneConfig) { - // We want to get the most recent change that has not applied yet. For transactions, this will - // be the most recent (last) zone config elem added. - if e.TableID == targetID { - tblZoneConfigElem = e - } - }) - if tblZoneConfigElem != nil { - partialZone = tblZoneConfigElem.ZoneConfig - zc = tblZoneConfigElem.ZoneConfig - } - default: - panic(errors.AssertionFailedf("programming error: unsupported object type for " + - "CONFIGURE ZONE")) - } - return partialZone, zc -} - -// retrieveCompleteZoneConfig looks up the zone for the specified database. -// -// If `getInheritedDefault` is true, the direct zone configuration, if it exists, -// is ignored, and the default zone config that would apply if it did not exist -// is returned instead. This is because, if the stmt is `USING DEFAULT`, we want -// to ignore the zone config that exists on targetID and instead skip to the -// inherited default. -func retrieveCompleteZoneConfig( - b BuildCtx, targetID catid.DescID, objType zoneConfigObjType, getInheritedDefault bool, -) (zoneID descpb.ID, zone *zonepb.ZoneConfig, seqNum uint32, err error) { - zc := &zonepb.ZoneConfig{} - if getInheritedDefault { - zoneID, zc, seqNum, err = getInheritedDefaultZoneConfig(b, targetID) - } else { - zoneID, zc, _, _, seqNum, err = getZoneConfig(b, targetID) - } - if err != nil { - return 0, nil, 0, err - } - - completeZc := *zc - if err = completeZoneConfig(b, targetID, objType, &completeZc); err != nil { - return 0, nil, 0, err - } - - zone = &completeZc - return zoneID, zone, seqNum, nil -} - -// getInheritedDefaultZoneConfig returns the inherited default zone config of -// `targetID`. This means -// - if `targetID` is a table ID, returns the zone config of its parent database -// (if exists) or the DEFAULT RANGE. -// - otherwise, returns the zone config of the DEFAULT RANGE -func getInheritedDefaultZoneConfig( - b BuildCtx, targetID catid.DescID, -) (zoneID catid.DescID, zc *zonepb.ZoneConfig, seqNum uint32, err error) { - // Is `targetID` a table? - maybeTblElem := retrievePhysicalTableElem(b, targetID) - // Yes: get the zone config of its parent database. - if maybeTblElem != nil { - parentDBID := mustRetrieveNamespaceElem(b, targetID).DatabaseID - zoneID, zc, _, _, seqNum, err = getZoneConfig(b, parentDBID) - return zoneID, zc, seqNum, err - } - // No: get the zone config of the DEFAULT RANGE. - zoneID, zc, _, _, seqNum, err = getZoneConfig(b, keys.RootNamespaceID) - return zoneID, zc, seqNum, err -} - -// getZoneConfig attempts to find the zone config from `system.zones` with -// `targetID` (`targetID` is either a database ID or a table ID). -func getZoneConfig( - b BuildCtx, targetID catid.DescID, -) ( - zoneID catid.DescID, - zc *zonepb.ZoneConfig, - subzoneID catid.DescID, - subzone *zonepb.ZoneConfig, - seqNum uint32, - err error, -) { - zc, seqNum, err = lookUpSystemZonesTable(b, targetID) - if err != nil { - return 0, nil, 0, nil, 0, err - } - - if zc != nil { - return zoneID, zc, 0, nil, seqNum, nil - } - - // No zone config for this ID. If `targetID` is a table, then recursively - // get zone config of its parent database. - tblElem := retrievePhysicalTableElem(b, targetID) - if tblElem != nil { - parentDBID := mustRetrieveNamespaceElem(b, targetID).DatabaseID - zoneID, zc, _, _, seqNum, err = getZoneConfig(b, parentDBID) - if err != nil { - return 0, nil, 0, nil, 0, err - } - return zoneID, zc, 0, nil, seqNum, nil - } - - // Otherwise, retrieve the default zone config, but only as long as that - // wasn't the ID we were trying to retrieve (avoid infinite recursion). - if targetID != keys.RootNamespaceID { - zoneID, zc, _, _, seqNum, err := getZoneConfig(b, keys.RootNamespaceID) - if err != nil { - return 0, nil, 0, nil, 0, err - } - return zoneID, zc, subzoneID, subzone, seqNum, nil - } - - // `targetID == keys.RootNamespaceID` but that zc config is not found - // in `system.zones` table. Return a special, recognizable error! - return 0, nil, 0, nil, 0, sqlerrors.ErrNoZoneConfigApplies -} - -// lookUpSystemZonesTable attempts to look up the zone config in `system.zones` -// table by `targetID`. -// If `targetID` is not found, a nil `zone` is returned. -func lookUpSystemZonesTable( - b BuildCtx, targetID catid.DescID, -) (zone *zonepb.ZoneConfig, seqNum uint32, err error) { - if keys.RootNamespaceID == uint32(targetID) { - zc, err := b.ZoneConfigGetter().GetZoneConfig(b, targetID) - if err != nil { - return nil, 0, err - } - zone = zc.ZoneConfigProto() - } else { - // It's a descriptor-backed target (i.e. a database ID or a table ID) - b.QueryByID(targetID).ForEach(func( - _ scpb.Status, _ scpb.TargetStatus, e scpb.Element, - ) { - switch e := e.(type) { - case *scpb.DatabaseZoneConfig: - if e.DatabaseID == targetID { - zone = e.ZoneConfig - seqNum = e.SeqNum - } - case *scpb.TableZoneConfig: - if e.TableID == targetID { - zone = e.ZoneConfig - seqNum = e.SeqNum - } - } - }) - } - return zone, seqNum, nil -} - -// completeZoneConfig takes a zone config pointer and fills in the -// missing fields by following the chain of inheritance. -// In the worst case, will have to inherit from the default zone config. -func completeZoneConfig( - b BuildCtx, targetID catid.DescID, objType zoneConfigObjType, zone *zonepb.ZoneConfig, -) error { - if zone.IsComplete() { - return nil - } - // Check to see if it's a table. If so, inherit from the database. - // For all other cases, inherit from the default. - if objType == tableObj { - dbID := mustRetrieveNamespaceElem(b, targetID).DatabaseID - _, dbZone, _, _, _, err := getZoneConfig(b, dbID) - if err != nil { - return err - } - zone.InheritFromParent(dbZone) - } - // Check if zone is complete. If not, inherit from the default zone config - if zone.IsComplete() { - return nil - } - _, defaultZone, _, _, _, err := getZoneConfig(b, keys.RootNamespaceID) - if err != nil { - return err - } - zone.InheritFromParent(defaultZone) - return nil -} - -// validateZoneAttrsAndLocalities ensures that all constraints/lease preferences -// specified in the new zone config snippet are actually valid, meaning that -// they match at least one node. This protects against user typos causing -// zone configs that silently don't work as intended. -// -// validateZoneAttrsAndLocalities is tenant aware in its validation. Secondary -// tenants don't have access to the NodeStatusServer, and as such, aren't -// allowed to set non-locality attributes in their constraints. Furthermore, -// their access is validated using the descs.RegionProvider. -func validateZoneAttrsAndLocalities(b BuildCtx, currentZone, newZone *zonepb.ZoneConfig) error { - // Avoid RPCs to the Node/Region server if we don't have anything to validate. - if len(newZone.Constraints) == 0 && len(newZone.VoterConstraints) == 0 && len(newZone.LeasePreferences) == 0 { - return nil - } - if b.Codec().ForSystemTenant() { - ss, err := b.NodesStatusServer().OptionalNodesStatusServer() - if err != nil { - return err - } - return validateZoneAttrsAndLocalitiesForSystemTenant(b, ss.ListNodesInternal, currentZone, newZone) - } - return validateZoneLocalitiesForSecondaryTenants( - b, b.GetRegions, currentZone, newZone, b.Codec(), b.ClusterSettings(), - ) -} - -type nodeGetter func(context.Context, *serverpb.NodesRequest) (*serverpb.NodesResponse, error) -type regionsGetter func(context.Context) (*serverpb.RegionsResponse, error) - -// validateZoneAttrsAndLocalitiesForSystemTenant performs constraint/ lease -// preferences validation for the system tenant. Only newly added constraints -// are validated. The system tenant is allowed to reference both locality and -// non-locality attributes as it has access to node information via the -// NodeStatusServer. -// -// For the system tenant, this only catches typos in required constraints. This -// is by design. We don't want to reject prohibited constraints whose -// attributes/localities don't match any of the current nodes because it's a -// reasonable use case to add prohibited constraints for a new set of nodes -// before adding the new nodes to the cluster. If you had to first add one of -// the nodes before creating the constraints, data could be replicated there -// that shouldn't be. -func validateZoneAttrsAndLocalitiesForSystemTenant( - b BuildCtx, getNodes nodeGetter, currentZone, newZone *zonepb.ZoneConfig, -) error { - nodes, err := getNodes(b, &serverpb.NodesRequest{}) - if err != nil { - return err - } - - toValidate := accumulateNewUniqueConstraints(currentZone, newZone) - - // Check that each constraint matches some store somewhere in the cluster. - for _, constraint := range toValidate { - // We skip validation for negative constraints. See the function-level comment. - if constraint.Type == zonepb.Constraint_PROHIBITED { - continue - } - var found bool - node: - for _, node := range nodes.Nodes { - for _, store := range node.StoreStatuses { - // We could alternatively use zonepb.StoreMatchesConstraint here to - // catch typos in prohibited constraints as well, but as noted in the - // function-level comment that could break very reasonable use cases for - // prohibited constraints. - if zonepb.StoreSatisfiesConstraint(store.Desc, constraint) { - found = true - break node - } - } - } - if !found { - return pgerror.Newf(pgcode.CheckViolation, - "constraint %q matches no existing nodes within the cluster - did you enter it correctly?", - constraint) - } - } - - return nil -} - -// validateZoneLocalitiesForSecondaryTenants performs constraint/lease -// preferences validation for secondary tenants. Only newly added constraints -// are validated. Unless SecondaryTenantsAllZoneConfigsEnabled is set to 'true', -// secondary tenants are only allowed to reference locality attributes as they -// only have access to region information via the serverpb.TenantStatusServer. -// In that case they're only allowed to reference the "region" and "zone" tiers. -// -// Unlike the system tenant, we also validate prohibited constraints. This is -// because secondary tenant must operate in the narrow view exposed via the -// serverpb.TenantStatusServer and are not allowed to configure arbitrary -// constraints (required or otherwise). -func validateZoneLocalitiesForSecondaryTenants( - ctx context.Context, - getRegions regionsGetter, - currentZone, newZone *zonepb.ZoneConfig, - codec keys.SQLCodec, - settings *cluster.Settings, -) error { - toValidate := accumulateNewUniqueConstraints(currentZone, newZone) - - // rs and zs will be lazily populated with regions and zones, respectively. - // These should not be accessed directly - use getRegionsAndZones helper - // instead. - var rs, zs map[string]struct{} - getRegionsAndZones := func() (regions, zones map[string]struct{}, _ error) { - if rs != nil { - return rs, zs, nil - } - resp, err := getRegions(ctx) - if err != nil { - return nil, nil, err - } - rs, zs = make(map[string]struct{}), make(map[string]struct{}) - for regionName, regionMeta := range resp.Regions { - rs[regionName] = struct{}{} - for _, zone := range regionMeta.Zones { - zs[zone] = struct{}{} - } - } - return rs, zs, nil - } - - for _, constraint := range toValidate { - switch constraint.Key { - case "zone": - _, zones, err := getRegionsAndZones() - if err != nil { - return err - } - _, found := zones[constraint.Value] - if !found { - return pgerror.Newf( - pgcode.CheckViolation, - "zone %q not found", - constraint.Value, - ) - } - case "region": - regions, _, err := getRegionsAndZones() - if err != nil { - return err - } - _, found := regions[constraint.Value] - if !found { - return pgerror.Newf( - pgcode.CheckViolation, - "region %q not found", - constraint.Value, - ) - } - default: - if err := sqlclustersettings.RequireSystemTenantOrClusterSetting( - codec, settings, sqlclustersettings.SecondaryTenantsAllZoneConfigsEnabled, - ); err != nil { - return err - } - } - } - return nil -} - -// accumulateNewUniqueConstraints returns a list of unique constraints in the -// given newZone config proto that are not in the currentZone -func accumulateNewUniqueConstraints(currentZone, newZone *zonepb.ZoneConfig) []zonepb.Constraint { - seenConstraints := make(map[zonepb.Constraint]struct{}) - retConstraints := make([]zonepb.Constraint, 0) - addToValidate := func(c zonepb.Constraint) { - if _, ok := seenConstraints[c]; ok { - // Already in the list or in the current zone config, nothing to do. - return - } - retConstraints = append(retConstraints, c) - seenConstraints[c] = struct{}{} - } - // First scan all the current zone config constraints. - for _, constraints := range currentZone.Constraints { - for _, constraint := range constraints.Constraints { - seenConstraints[constraint] = struct{}{} - } - } - for _, constraints := range currentZone.VoterConstraints { - for _, constraint := range constraints.Constraints { - seenConstraints[constraint] = struct{}{} - } - } - for _, leasePreferences := range currentZone.LeasePreferences { - for _, constraint := range leasePreferences.Constraints { - seenConstraints[constraint] = struct{}{} - } - } - - // Then scan all the new zone config constraints, adding the ones that - // were not seen already. - for _, constraints := range newZone.Constraints { - for _, constraint := range constraints.Constraints { - addToValidate(constraint) - } - } - for _, constraints := range newZone.VoterConstraints { - for _, constraint := range constraints.Constraints { - addToValidate(constraint) - } - } - for _, leasePreferences := range newZone.LeasePreferences { - for _, constraint := range leasePreferences.Constraints { - addToValidate(constraint) - } - } - return retConstraints -} - -// fallBackIfNotSupportedZoneConfig determines if the table has a database -// zone config. -func fallBackIfNotSupportedZoneConfig(n *tree.SetZoneConfig) (zoneConfigObjType, error) { - if n.Discard { - return unspecifiedObj, - scerrors.NotImplementedErrorf(n, "CONFIGURE ZONE DISCARD is not supported in DSC") - } - if n.Database != "" { - return databaseObj, nil + return nil, errors.AssertionFailedf("tableID not found for table %s", tblName) } + tzo := tableZoneConfigObj{tableID: tableID} + // We are a table object. if n.TargetsTable() && !n.TargetsIndex() && !n.TargetsPartition() { - return tableObj, nil + return &tzo, nil } - return unspecifiedObj, scerrors.NotImplementedErrorf(n, "unsupported CONFIGURE ZONE target") -} -// addZoneConfigToBuildCtx adds the zone config to the build context and returns -// the added element for logging. -func addZoneConfigToBuildCtx( - b BuildCtx, - n *tree.SetZoneConfig, - zc *zonepb.ZoneConfig, - seqNum uint32, - objType zoneConfigObjType, -) scpb.Element { - var elem scpb.Element - // Increment the value of seqNum to ensure a new zone config is being - // updated with a different seqNum. - seqNum += 1 - targetID, err := getTargetIDFromZoneSpecifier(b, n.ZoneSpecifier, objType) - if err != nil { - panic(err) - } - switch objType { - case databaseObj: - elem = &scpb.DatabaseZoneConfig{ - DatabaseID: targetID, - ZoneConfig: zc, - SeqNum: seqNum, - } - case tableObj: - elem = &scpb.TableZoneConfig{ - TableID: targetID, - ZoneConfig: zc, - SeqNum: seqNum, - } - default: - panic(errors.AssertionFailedf("programming error: unsupported object type for CONFIGURE ZONE")) + // We are an index object. + if targetsIndex { + return &indexZoneConfigObj{tableZoneConfigObj: tzo}, nil } - b.Add(elem) - return elem -} -func retrievePhysicalTableElem(b BuildCtx, tableID catid.DescID) scpb.Element { - return b.QueryByID(tableID).Filter(func( - _ scpb.Status, _ scpb.TargetStatus, e scpb.Element, - ) bool { - switch e := e.(type) { - case *scpb.Table: - return e.TableID == tableID - case *scpb.View: - if e.IsMaterialized { - return e.ViewID == tableID - } - case *scpb.Sequence: - return e.SequenceID == tableID - } - return false - }).MustGetZeroOrOneElement() -} - -func mustRetrievePhysicalTableElem(b BuildCtx, tableID catid.DescID) scpb.Element { - physicalTableElem := retrievePhysicalTableElem(b, tableID) - if physicalTableElem == nil { - panic(errors.AssertionFailedf("programming error: cannot find a physical table "+ - "element for table %v", tableID)) - } - return physicalTableElem + return nil, errors.AssertionFailedf("unexpected zone config object") } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_function.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_function.go index d8f7b70bcae1..b95ec0d072da 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_function.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_function.go @@ -173,6 +173,15 @@ func CreateFunction(b BuildCtx, n *tree.CreateRoutine) { lang = v case tree.RoutineBodyStr: fnBodyStr = string(t) + case tree.RoutineSecurity: + s, err := funcinfo.SecurityToProto(t) + if err != nil { + panic(err) + } + b.Add(&scpb.FunctionSecurity{ + FunctionID: fnID, + Security: catpb.FunctionSecurity{Security: s}, + }) } } owner, ups := b.BuildUserPrivilegesFromDefaultPrivileges( diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/database_zone_config.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/database_zone_config.go new file mode 100644 index 000000000000..cfa4e2a5aa75 --- /dev/null +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/database_zone_config.go @@ -0,0 +1,211 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package scbuildstmt + +import ( + "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" + "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" +) + +// databaseZoneConfigObj is used to represent a table-specific zone configuration +// object. +type databaseZoneConfigObj struct { + databaseID catid.DescID + zoneConfig *zonepb.ZoneConfig + seqNum uint32 +} + +var _ zoneConfigObject = &databaseZoneConfigObj{} + +func (dzo *databaseZoneConfigObj) addZoneConfigToBuildCtx(b BuildCtx) scpb.Element { + dzo.seqNum += 1 + elem := &scpb.DatabaseZoneConfig{ + DatabaseID: dzo.databaseID, + ZoneConfig: dzo.zoneConfig, + SeqNum: dzo.seqNum, + } + b.Add(elem) + return elem +} + +func (dzo *databaseZoneConfigObj) checkPrivilegeForSetZoneConfig( + b BuildCtx, zs tree.ZoneSpecifier, +) error { + // For the system database, the user must be an admin. Otherwise, we + // require CREATE or ZONECONFIG privilege on the database in question. + if zs.Database == "system" { + return b.CheckGlobalPrivilege(privilege.REPAIRCLUSTER) + } + + // Can configure zone of a database if user has either CREATE or ZONECONFIG + // privilege on the database. + dbElem := b.ResolveDatabase(zs.Database, ResolveParams{}).FilterDatabase().MustGetOneElement() + dbCreatePrivilegeErr := b.CheckPrivilege(dbElem, privilege.CREATE) + dbZoneConfigPrivilegeErr := b.CheckPrivilege(dbElem, privilege.ZONECONFIG) + if dbZoneConfigPrivilegeErr == nil || dbCreatePrivilegeErr == nil { + return nil + } + + reqNonAdminPrivs := []privilege.Kind{privilege.ZONECONFIG, privilege.CREATE} + return sqlerrors.NewInsufficientPrivilegeOnDescriptorError(b.CurrentUser(), + reqNonAdminPrivs, string(catalog.Database), + mustRetrieveNamespaceElem(b, dbElem.DatabaseID).Name) +} + +func (dzo *databaseZoneConfigObj) checkZoneConfigChangePermittedForMultiRegion( + b BuildCtx, zs tree.ZoneSpecifier, options tree.KVOptions, +) error { + // If the user has specified that they're overriding, then the world is + // their oyster. + if b.SessionData().OverrideMultiRegionZoneConfigEnabled { + // Note that we increment the telemetry counter unconditionally here. + // It's possible that this will lead to over-counting as the user may + // have left the override on and is now updating a zone configuration + // that is not protected by the multi-region abstractions. To get finer + // grained counting, however, would be more difficult to code, and may + // not even prove to be that valuable, so we have decided to live with + // the potential for over-counting. + telemetry.Inc(sqltelemetry.OverrideMultiRegionZoneConfigurationUser) + return nil + } + + // Check if what we're altering is a multi-region entity. + dbRegionConfigElem := b.ResolveDatabase(zs.Database, + ResolveParams{}).FilterDatabaseRegionConfig().MustGetZeroOrOneElement() + if dbRegionConfigElem == nil { + // Not a multi-region database, we're done here. + return nil + } + + return maybeMultiregionErrorWithHint(options) +} + +func (dzo *databaseZoneConfigObj) getTargetID() catid.DescID { + return dzo.databaseID +} + +func (dzo *databaseZoneConfigObj) retrievePartialZoneConfig(b BuildCtx) *zonepb.ZoneConfig { + sameDB := func(e *scpb.DatabaseZoneConfig) bool { + return e.DatabaseID == dzo.getTargetID() + } + mostRecentElem := findMostRecentZoneConfig(dzo, func(id catid.DescID) *scpb.ElementCollection[*scpb.DatabaseZoneConfig] { + return b.QueryByID(id).FilterDatabaseZoneConfig() + }, sameDB) + + if mostRecentElem != nil { + dzo.zoneConfig = mostRecentElem.ZoneConfig + dzo.seqNum = mostRecentElem.SeqNum + } + + return dzo.zoneConfig +} + +func (dzo *databaseZoneConfigObj) retrieveCompleteZoneConfig( + b BuildCtx, getInheritedDefault bool, +) (*zonepb.ZoneConfig, *zonepb.Subzone, error) { + var err error + zc := &zonepb.ZoneConfig{} + if getInheritedDefault { + zc, err = dzo.getInheritedDefaultZoneConfig(b) + } else { + zc, _, err = dzo.getZoneConfig(b, false /* inheritDefaultRange */) + } + if err != nil { + return nil, nil, err + } + + completeZc := *zc + if err = dzo.completeZoneConfig(b, &completeZc); err != nil { + return nil, nil, err + } + + return zc, nil, nil +} + +func (dzo *databaseZoneConfigObj) completeZoneConfig(b BuildCtx, zone *zonepb.ZoneConfig) error { + if zone.IsComplete() { + return nil + } + // Check if zone is complete. If not, inherit from the default zone config + if zone.IsComplete() { + return nil + } + defaultZone, _, err := dzo.getZoneConfig(b, true /* inheritDefaultRange */) + if err != nil { + return err + } + zone.InheritFromParent(defaultZone) + return nil +} + +func (dzo *databaseZoneConfigObj) setZoneConfigToWrite(zone *zonepb.ZoneConfig) { + dzo.zoneConfig = zone +} + +func (dzo *databaseZoneConfigObj) getInheritedDefaultZoneConfig( + b BuildCtx, +) (*zonepb.ZoneConfig, error) { + // Get the zone config of the DEFAULT RANGE. + zc, _, err := dzo.getZoneConfig(b, true /* inheritDefaultRange */) + return zc, err +} + +func (dzo *databaseZoneConfigObj) getZoneConfig( + b BuildCtx, inheritDefaultRange bool, +) (*zonepb.ZoneConfig, *zonepb.ZoneConfig, error) { + var subzones []zonepb.Subzone + zc, subzones, err := lookUpSystemZonesTable(b, dzo, inheritDefaultRange) + if err != nil { + return nil, nil, err + } + + // If the zone config exists, we know that it is not a subzone placeholder. + if zc != nil { + return zc, nil, err + } + + zc = zonepb.NewZoneConfig() + zc.Subzones = subzones + subzone := zc + + // No zone config for this ID. Retrieve the default zone config, but only as + // long as that wasn't the ID we were trying to retrieve + // (to avoid infinite recursion). + if !inheritDefaultRange { + zc, _, err := dzo.getZoneConfig(b, true /* inheritDefaultRange */) + if err != nil { + return nil, nil, err + } + return zc, subzone, nil + } + + // `targetID == keys.RootNamespaceID` but that zc config is not found + // in `system.zones` table. Return a special, recognizable error! + return nil, nil, sqlerrors.ErrNoZoneConfigApplies +} + +func (dzo *databaseZoneConfigObj) applyZoneConfig( + b BuildCtx, + n *tree.SetZoneConfig, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), +) error { + partialZone, err := prepareZoneConfig(b, n, copyFromParentList, setters, dzo) + dzo.setZoneConfigToWrite(partialZone) + return err +} diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_index.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_index.go index c2b0a687ed71..d3807c2c2208 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_index.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_index.go @@ -101,6 +101,7 @@ func maybeDropIndex( // We don't support handling zone config related properties for tables, so // throw an unsupported error. fallBackIfSubZoneConfigExists(b, nil, sie.TableID) + panicIfSchemaIsLocked(b.QueryByID(sie.TableID)) // Cannot drop the index if not CASCADE and a unique constraint depends on it. if dropBehavior != tree.DropCascade && sie.IsUnique && !sie.IsCreatedExplicitly { panic(errors.WithHint( @@ -109,7 +110,6 @@ func maybeDropIndex( "use CASCADE if you really want to drop it.", )) } - panicIfSchemaIsLocked(b.QueryByID(sie.TableID)) dropSecondaryIndex(b, indexName, dropBehavior, sie) return sie } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go index d9fa717c374c..63b211189cfe 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go @@ -1679,3 +1679,43 @@ func retrieveColumnComputeExpression( columnType := mustRetrieveColumnTypeElem(b, tableID, columnID) return columnType.ComputeExpr } + +// mustRetrieveColumnTypeElem retrieves the index column elements associated +// with the given indexID. +func mustRetrieveIndexColumnElements( + b BuildCtx, tableID catid.DescID, indexID catid.IndexID, +) []*scpb.IndexColumn { + // Get the index columns for indexID. + var idxCols []*scpb.IndexColumn + b.QueryByID(tableID).FilterIndexColumn(). + Filter(func(current scpb.Status, target scpb.TargetStatus, e *scpb.IndexColumn) bool { + return e.IndexID == indexID + }).ForEach(func(current scpb.Status, target scpb.TargetStatus, e *scpb.IndexColumn) { + idxCols = append(idxCols, e) + }) + if len(idxCols) == 0 { + panic(errors.AssertionFailedf("programming error: cannot find a IndexColumn "+ + "element for index ID %v", indexID)) + } + return idxCols +} + +// mustRetrievePhysicalTableElem will resolve a tableID to a physical table +// element. A "physical" table element includes tables, views, and sequences. +func mustRetrievePhysicalTableElem(b BuildCtx, descID catid.DescID) scpb.Element { + return b.QueryByID(descID).Filter(func( + _ scpb.Status, _ scpb.TargetStatus, e scpb.Element, + ) bool { + switch e := e.(type) { + case *scpb.Table: + return e.TableID == descID + case *scpb.View: + if e.IsMaterialized { + return e.ViewID == descID + } + case *scpb.Sequence: + return e.SequenceID == descID + } + return false + }).MustGetOneElement() +} diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/index_zone_config.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/index_zone_config.go new file mode 100644 index 000000000000..9247b35c00b8 --- /dev/null +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/index_zone_config.go @@ -0,0 +1,267 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package scbuildstmt + +import ( + "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +// indexZoneConfigObj is used to represent a table-specific zone configuration +// object. +type indexZoneConfigObj struct { + tableZoneConfigObj + indexID catid.IndexID + indexSubzone *zonepb.Subzone + seqNum uint32 +} + +var _ zoneConfigObject = &indexZoneConfigObj{} + +func (izo *indexZoneConfigObj) getTableZoneConfig() *zonepb.ZoneConfig { + return izo.tableZoneConfigObj.zoneConfig +} + +func (izo *indexZoneConfigObj) addZoneConfigToBuildCtx(b BuildCtx) scpb.Element { + izo.seqNum += 1 + subzones := []zonepb.Subzone{*izo.indexSubzone} + + // Merge the new subzones with the old subzones so that we can generate + // accurate subzone spans. + parentZoneConfig := izo.getTableZoneConfig() + if parentZoneConfig != nil { + parentZoneConfig.SetSubzone(*izo.indexSubzone) + subzones = parentZoneConfig.Subzones + } + + ss, err := generateSubzoneSpans(b, izo.tableID, subzones, izo.indexID, "") + if err != nil { + panic(err) + } + + elem := &scpb.IndexZoneConfig{ + TableID: izo.tableID, + IndexID: izo.indexID, + Subzone: *izo.indexSubzone, + SubzoneSpans: ss, + SeqNum: izo.seqNum, + } + b.Add(elem) + return elem +} + +func (izo *indexZoneConfigObj) retrievePartialZoneConfig(b BuildCtx) *zonepb.ZoneConfig { + sameIdx := func(e *scpb.IndexZoneConfig) bool { + return e.TableID == izo.getTargetID() && e.IndexID == izo.indexID + } + mostRecentElem := findMostRecentZoneConfig(izo, func(id catid.DescID) *scpb.ElementCollection[*scpb.IndexZoneConfig] { + return b.QueryByID(id).FilterIndexZoneConfig() + }, sameIdx) + + if mostRecentElem != nil { + idxZc := zonepb.NewZoneConfig() + idxZc.Subzones = []zonepb.Subzone{mostRecentElem.Subzone} + izo.zoneConfig = idxZc + izo.seqNum = mostRecentElem.SeqNum + } + + return izo.zoneConfig +} + +func (izo *indexZoneConfigObj) retrieveCompleteZoneConfig( + b BuildCtx, getInheritedDefault bool, +) (*zonepb.ZoneConfig, *zonepb.Subzone, error) { + var placeholder *zonepb.ZoneConfig + var err error + zc := &zonepb.ZoneConfig{} + if getInheritedDefault { + zc, err = izo.getInheritedDefaultZoneConfig(b) + } else { + zc, placeholder, err = izo.getZoneConfig(b, false /* inheritDefaultRange */) + } + if err != nil { + return nil, nil, err + } + + completeZc := *zc + if err = izo.completeZoneConfig(b, &completeZc); err != nil { + return nil, nil, err + } + + var subzone *zonepb.Subzone + indexID := izo.indexID + if placeholder != nil { + // TODO(annie): once we support partitions, we will need to pass in the + // actual partition name here. + if subzone = placeholder.GetSubzone(uint32(indexID), ""); subzone != nil { + if indexSubzone := placeholder.GetSubzone(uint32(indexID), ""); indexSubzone != nil { + subzone.Config.InheritFromParent(&indexSubzone.Config) + } + subzone.Config.InheritFromParent(zc) + return placeholder, subzone, nil + } + } else { + if subzone = zc.GetSubzone(uint32(indexID), ""); subzone != nil { + if indexSubzone := zc.GetSubzone(uint32(indexID), ""); indexSubzone != nil { + subzone.Config.InheritFromParent(&indexSubzone.Config) + } + subzone.Config.InheritFromParent(zc) + } + } + return zc, subzone, nil +} + +func (izo *indexZoneConfigObj) setZoneConfigToWrite(zone *zonepb.ZoneConfig) { + var subzoneToWrite *zonepb.Subzone + for _, subzone := range zone.Subzones { + if subzone.IndexID == uint32(izo.indexID) && len(subzone.PartitionName) == 0 { + subzoneToWrite = &subzone + } + } + izo.indexSubzone = subzoneToWrite +} + +// getInheritedFieldsForPartialSubzone returns the set of inherited fields for +// a partial subzone based off of its parent zone. +func (izo *indexZoneConfigObj) getInheritedFieldsForPartialSubzone( + b BuildCtx, partialZone *zonepb.ZoneConfig, +) (*zonepb.ZoneConfig, error) { + // We are operating on a subZone and need to inherit all remaining + // unset fields in its parent zone, which is partialZone. + zoneInheritedFields := *partialZone + if err := izo.completeZoneConfig(b, &zoneInheritedFields); err != nil { + return nil, err + } + // Since we have just an index, we should copy from the inherited + // zone's fields (whether that was the table or database). + return &zoneInheritedFields, nil +} + +func (izo *indexZoneConfigObj) applyZoneConfig( + b BuildCtx, + n *tree.SetZoneConfig, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), +) error { + // TODO(annie): once we allow configuring zones for named zones/system ranges, + // we will need to guard against secondary tenants from configuring such + // ranges. + + // We are configuring an index. Determine the index ID and fill this + // information out in our zoneConfigObject. + fillIndexAndPartitionFromZoneSpecifier(b, n.ZoneSpecifier, izo) + indexID := izo.indexID + tempIndexID := mustRetrieveIndexElement(b, izo.getTargetID(), indexID).TemporaryIndexID + + // Retrieve the partial zone configuration + partialZone := izo.retrievePartialZoneConfig(b) + + subzonePlaceholder := false + // No zone was found. Possibly a SubzonePlaceholder depending on the index. + if partialZone == nil { + partialZone = zonepb.NewZoneConfig() + subzonePlaceholder = true + } + currentZone := protoutil.Clone(partialZone).(*zonepb.ZoneConfig) + + var partialSubzone *zonepb.Subzone + partialSubzone = partialZone.GetSubzoneExact(uint32(indexID), "") + if partialSubzone == nil { + partialSubzone = &zonepb.Subzone{Config: *zonepb.NewZoneConfig()} + } + + // Retrieve the zone configuration. + // + // If the statement was USING DEFAULT, we want to ignore the zone + // config that exists on targetID and instead skip to the inherited + // default (whichever applies -- a database if targetID is a table, default + // if targetID is a database, etc.). For this, we use the last parameter + // getInheritedDefault to retrieveCompleteZoneConfig(). + // These zones are only used for validations. The merged zone will not + // be written. + completeZone, completeSubZone, err := izo.retrieveCompleteZoneConfig(b, + n.SetDefault /* getInheritedDefault */) + if err != nil { + return err + } + + // We need to inherit zone configuration information from the correct zone, + // not completeZone. + { + zoneInheritedFields, err := izo.getInheritedFieldsForPartialSubzone(b, partialZone) + if err != nil { + return err + } + partialSubzone.Config.CopyFromZone(*zoneInheritedFields, copyFromParentList) + } + + // Determine where to load the configuration. + newZone := *completeZone + if completeSubZone != nil { + newZone = completeSubZone.Config + } + + // If an existing subzone is being modified, finalZone is overridden. + finalZone := partialSubzone.Config + + if n.SetDefault { + finalZone = *zonepb.NewZoneConfig() + } + + // Fill in our zone configs with var = val assignments. + if err := loadSettingsToZoneConfigs(setters, &newZone, &finalZone); err != nil { + return err + } + + // Validate that there are no conflicts in the zone setup. + if err := zonepb.ValidateNoRepeatKeysInZone(&newZone); err != nil { + return err + } + + if err := validateZoneAttrsAndLocalities(b, currentZone, &newZone); err != nil { + return err + } + + // Fill in the final zone config with subzones. + fillZoneConfigsForSubzones(indexID, "", tempIndexID, subzonePlaceholder, completeZone, + partialZone, newZone, finalZone) + + // Finally, revalidate everything. Validate only the completeZone config. + if err := completeZone.Validate(); err != nil { + return pgerror.Wrap(err, pgcode.CheckViolation, "could not validate zone config") + } + + // Finally, check for the extra protection partial zone configs would + // require from changes made to parent zones. The extra protections are: + // + // RangeMinBytes and RangeMaxBytes must be set together + // LeasePreferences cannot be set unless Constraints/VoterConstraints are + // explicitly set + // Per-replica constraints cannot be set unless num_replicas is explicitly + // set + // Per-voter constraints cannot be set unless num_voters is explicitly set + if err := finalZone.ValidateTandemFields(); err != nil { + err = errors.Wrap(err, "could not validate zone config") + err = pgerror.WithCandidateCode(err, pgcode.InvalidParameterValue) + err = errors.WithHint(err, + "try ALTER ... CONFIGURE ZONE USING = COPY FROM PARENT [, ...] to "+ + "populate the field") + return err + } + izo.setZoneConfigToWrite(partialZone) + return err +} diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/table_zone_config.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/table_zone_config.go new file mode 100644 index 000000000000..b8d4eca6672c --- /dev/null +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/table_zone_config.go @@ -0,0 +1,198 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package scbuildstmt + +import ( + "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" + "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" +) + +// tableZoneConfigObj is used to represent a table-specific zone configuration +// object. +type tableZoneConfigObj struct { + databaseZoneConfigObj + tableID catid.DescID + zoneConfig *zonepb.ZoneConfig + seqNum uint32 +} + +var _ zoneConfigObject = &tableZoneConfigObj{} + +func (tzo *tableZoneConfigObj) addZoneConfigToBuildCtx(b BuildCtx) scpb.Element { + tzo.seqNum += 1 + elem := &scpb.TableZoneConfig{ + TableID: tzo.tableID, + ZoneConfig: tzo.zoneConfig, + SeqNum: tzo.seqNum, + } + b.Add(elem) + return elem +} + +func (tzo *tableZoneConfigObj) checkPrivilegeForSetZoneConfig( + b BuildCtx, zs tree.ZoneSpecifier, +) error { + // TODO(#125882): currently, we fall back to the legacy schema changer below + // if the zone config target is a system table. The only thing the legacy + // schema changer will do is populate an error -- since configuring system + // tables is not allowed. We should add this check + // (checkIfConfigurationAllowed) back in DSC-land when our builder doesn't + // panic on system tables. + tblElem := mustRetrievePhysicalTableElem(b, tzo.tableID) + tblNamespaceElem := mustRetrieveNamespaceElem(b, tzo.tableID) + if tblNamespaceElem.DatabaseID == keys.SystemDatabaseID { + return b.CheckGlobalPrivilege(privilege.REPAIRCLUSTER) + } + // Can configure zone of a table (or its index) if user has either CREATE or + // ZONECONFIG privilege on the table. + tableCreatePrivilegeErr := b.CheckPrivilege(tblElem, privilege.CREATE) + tableZoneConfigPrivilegeErr := b.CheckPrivilege(tblElem, privilege.ZONECONFIG) + if tableCreatePrivilegeErr == nil || tableZoneConfigPrivilegeErr == nil { + return nil + } + + reqNonAdminPrivs := []privilege.Kind{privilege.ZONECONFIG, privilege.CREATE} + return sqlerrors.NewInsufficientPrivilegeOnDescriptorError(b.CurrentUser(), + reqNonAdminPrivs, string(catalog.Table), tblNamespaceElem.Name) +} + +func (tzo *tableZoneConfigObj) checkZoneConfigChangePermittedForMultiRegion( + b BuildCtx, zs tree.ZoneSpecifier, options tree.KVOptions, +) error { + // If the user has specified that they're overriding, then the world is + // their oyster. + if b.SessionData().OverrideMultiRegionZoneConfigEnabled { + // Note that we increment the telemetry counter unconditionally here. + // It's possible that this will lead to over-counting as the user may + // have left the override on and is now updating a zone configuration + // that is not protected by the multi-region abstractions. To get finer + // grained counting however, would be more difficult to code, and may + // not even prove to be that valuable, so we have decided to live with + // the potential for over-counting. + telemetry.Inc(sqltelemetry.OverrideMultiRegionZoneConfigurationUser) + return nil + } + + // We're dealing with a table-based zone configuration change. Determine + // if this is a multi-region table. + if !isMultiRegionTable(b, tzo.tableID) { + // Not a multi-region table, we're done here. + return nil + } + + return maybeMultiregionErrorWithHint(options) +} + +func (tzo *tableZoneConfigObj) getTargetID() catid.DescID { + return tzo.tableID +} + +func (tzo *tableZoneConfigObj) retrievePartialZoneConfig(b BuildCtx) *zonepb.ZoneConfig { + sameTbl := func(e *scpb.TableZoneConfig) bool { + return e.TableID == tzo.getTargetID() + } + mostRecentElem := findMostRecentZoneConfig(tzo, func(id catid.DescID) *scpb.ElementCollection[*scpb.TableZoneConfig] { + return b.QueryByID(id).FilterTableZoneConfig() + }, sameTbl) + + if mostRecentElem != nil { + tzo.zoneConfig = mostRecentElem.ZoneConfig + tzo.seqNum = mostRecentElem.SeqNum + } + + return tzo.zoneConfig +} + +func (tzo *tableZoneConfigObj) completeZoneConfig(b BuildCtx, zone *zonepb.ZoneConfig) error { + if zone.IsComplete() { + return nil + } + // For tables, inherit from the database. + dbID := mustRetrieveNamespaceElem(b, tzo.getTargetID()).DatabaseID + dzo := databaseZoneConfigObj{databaseID: dbID} + dbZone, _, err := dzo.getZoneConfig(b, false /* inheritDefaultRange */) + if err != nil { + return err + } + zone.InheritFromParent(dbZone) + // Check if zone is complete. If not, inherit from the default zone config + if zone.IsComplete() { + return nil + } + defaultZone, _, err := tzo.getZoneConfig(b, true /* inheritDefaultRange */) + if err != nil { + return err + } + zone.InheritFromParent(defaultZone) + return nil +} + +func (tzo *tableZoneConfigObj) setZoneConfigToWrite(zone *zonepb.ZoneConfig) { + tzo.zoneConfig = zone +} + +func (tzo *tableZoneConfigObj) getInheritedDefaultZoneConfig( + b BuildCtx, +) (zc *zonepb.ZoneConfig, err error) { + targetID := tzo.getTargetID() + parentDBID := mustRetrieveNamespaceElem(b, targetID).DatabaseID + dzo := databaseZoneConfigObj{databaseID: parentDBID} + zc, _, err = dzo.getZoneConfig(b, false /* inheritDefaultRange */) + return zc, err +} + +func (tzo *tableZoneConfigObj) getZoneConfig( + b BuildCtx, inheritDefaultRange bool, +) (*zonepb.ZoneConfig, *zonepb.ZoneConfig, error) { + var subzones []zonepb.Subzone + zc, subzones, err := lookUpSystemZonesTable(b, tzo, inheritDefaultRange) + if err != nil { + return nil, nil, err + } + + // If the zone config exists, we know that it is not a subzone placeholder. + if zc != nil { + return zc, nil, err + } + + zc = zonepb.NewZoneConfig() + zc.Subzones = subzones + subzone := zc + + // Since our target is a table, recursively get the zone config of its parent + // database. + parentDBID := mustRetrieveNamespaceElem(b, tzo.getTargetID()).DatabaseID + dzo := databaseZoneConfigObj{databaseID: parentDBID} + zc, _, err = dzo.getZoneConfig(b, inheritDefaultRange) + if err != nil { + return nil, nil, err + } + return zc, subzone, nil +} + +func (tzo *tableZoneConfigObj) applyZoneConfig( + b BuildCtx, + n *tree.SetZoneConfig, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), +) error { + partialZone, err := prepareZoneConfig(b, n, copyFromParentList, setters, tzo) + tzo.setZoneConfigToWrite(partialZone) + return err +} diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/zone_config_helpers.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/zone_config_helpers.go new file mode 100644 index 000000000000..193a319e9d4b --- /dev/null +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/zone_config_helpers.go @@ -0,0 +1,1180 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package scbuildstmt + +import ( + "bytes" + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/fetchpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/zone" + "github.com/cockroachdb/cockroach/pkg/sql/covering" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" + "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" + "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlclustersettings" + "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +type zoneConfigAuthorizer interface { + // checkPrivilegeForSetZoneConfig checks whether the current user has the + // right privilege for configuring zone on the specified object. + checkPrivilegeForSetZoneConfig(b BuildCtx, zs tree.ZoneSpecifier) error + + // checkZoneConfigChangePermittedForMultiRegion checks if a zone config + // change is permitted for a multi-region database or table. + // The change is permitted iff it is not modifying a protected multi-region + // field of the zone configs (as defined by + // zonepb.MultiRegionZoneConfigFields). + checkZoneConfigChangePermittedForMultiRegion( + b BuildCtx, zs tree.ZoneSpecifier, options tree.KVOptions) error +} + +type zoneConfigObjBuilder interface { + // addZoneConfigToBuildCtx adds the zone config to the build context and + // returns the added element for logging. + addZoneConfigToBuildCtx(b BuildCtx) scpb.Element + + // getTargetID returns the target ID of the zone config object. This is either + // a database or a table ID. + getTargetID() catid.DescID + + // applyZoneConfig + applyZoneConfig( + b BuildCtx, + n *tree.SetZoneConfig, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), + ) error + + // setZoneConfigToWrite fills our object with the zone config/subzone config + // we will be writing to KV. + setZoneConfigToWrite(zone *zonepb.ZoneConfig) +} + +type zoneConfigRetriever interface { + // retrievePartialZoneConfig retrieves the partial zone configuration of the + // ID of our object. This will either be a database ID or a table ID. + retrievePartialZoneConfig(b BuildCtx) *zonepb.ZoneConfig + + // retrieveCompleteZoneConfig looks up the zone and subzone for the specified + // object ID, index, and partition. + // + // If `getInheritedDefault` is true, the direct zone configuration, if it exists, + // is ignored, and the default zone config that would apply if it did not exist + // is returned instead. This is because, if the stmt is `USING DEFAULT`, we want + // to ignore the zone config that exists on targetID and instead skip to the + // inherited default. + retrieveCompleteZoneConfig( + b BuildCtx, getInheritedDefault bool) (*zonepb.ZoneConfig, *zonepb.Subzone, error) + + // getInheritedDefaultZoneConfig returns the inherited default zone config of + // the target. This means + // - if the target is a table, it returns the zone config of its parent + // database (if exists) or the DEFAULT RANGE. + // - otherwise, it returns the zone config of the DEFAULT RANGE + getInheritedDefaultZoneConfig(b BuildCtx) ( + zc *zonepb.ZoneConfig, err error, + ) + + // getZoneConfig attempts to find the zone config from `system.zones` with + // the target: + // - If the zone config is found to be a subzone placeholder, then we + // further go ahead to find the parent database zone config + // (if our target is a table) or to find the DEFAULT RANGE zone config + // (if our target is a database). + // - Otherwise, we will just return the found zone config + // (so `subzoneId` and `subzone` will be nil). + getZoneConfig(b BuildCtx, inheritDefaultRange bool) ( + *zonepb.ZoneConfig, + *zonepb.ZoneConfig, + error, + ) +} + +type zoneConfigMutator interface { + // completeZoneConfig takes a zone config pointer and fills in the + // missing fields by following the chain of inheritance. + // In the worst case, will have to inherit from the default zone config. + // NOTE: This will not work for subzones. To complete subzones, find a + // complete parent zone (index or table) and apply InheritFromParent to it. + completeZoneConfig(b BuildCtx, zone *zonepb.ZoneConfig) error +} + +// zoneConfigObject is used to represent various types of "objects" that are +// supported by the CONFIGURE ZONE statement. This is used to determine the +// scpb that will be generated. +type zoneConfigObject interface { + zoneConfigAuthorizer + zoneConfigObjBuilder + zoneConfigRetriever + zoneConfigMutator +} + +// resolvePhysicalTableName resolves the table name for a physical table +// in the SetZoneConfig AST by directly modifying its TableOrIndex.Table. +func resolvePhysicalTableName(b BuildCtx, n *tree.SetZoneConfig) { + uon := n.ZoneSpecifier.TableOrIndex.Table.ToUnresolvedObjectName() + tn := uon.ToTableName() + elts := b.ResolvePhysicalTable(uon, ResolveParams{}) + tbl := elts.Filter(func(_ scpb.Status, _ scpb.TargetStatus, e scpb.Element) bool { + switch e := e.(type) { + case *scpb.Table: + return true + case *scpb.View: + if e.IsMaterialized { + return true + } + case *scpb.Sequence: + return true + } + return false + }).MustGetOneElement() + tn.ObjectNamePrefix = b.NamePrefix(tbl) + n.ZoneSpecifier.TableOrIndex.Table = tn +} + +// maybeMultiregionErrorWithHint returns an error if the user is trying to +// update a zone config value that's protected for multi-region databases. +func maybeMultiregionErrorWithHint(options tree.KVOptions) error { + hint := "to override this error, SET override_multi_region_zone_config = true and reissue the command" + // This is clearly an n^2 operation, but since there are only a single + // digit number of zone config keys, it's likely faster to do it this way + // than incur the memory allocation of creating a map. + for _, opt := range options { + for _, cfg := range zonepb.MultiRegionZoneConfigFields { + if opt.Key == cfg { + // User is trying to update a zone config value that's protected for + // multi-region databases. Return the constructed error. + err := errors.Newf("attempting to modify protected field %q of a multi-region zone "+ + "configuration", string(opt.Key), + ) + return errors.WithHint(err, hint) + } + } + } + return nil +} + +// isMultiRegionTable returns True if this table is a multi-region table, +// meaning it has locality GLOBAL, or REGIONAL BY TABLE, or REGIONAL BY ROW. +func isMultiRegionTable(b BuildCtx, tableID catid.DescID) bool { + tableElems := b.QueryByID(tableID) + globalElem := tableElems.FilterTableLocalityGlobal().MustGetZeroOrOneElement() + primaryRegionElem := tableElems.FilterTableLocalityPrimaryRegion().MustGetZeroOrOneElement() + secondaryRegionElem := tableElems.FilterTableLocalitySecondaryRegion().MustGetZeroOrOneElement() + RBRElem := tableElems.FilterTableLocalityRegionalByRow().MustGetZeroOrOneElement() + return globalElem != nil || primaryRegionElem != nil || secondaryRegionElem != nil || + RBRElem != nil +} + +// getUpdatedZoneConfigOptions unpacks all kv options for a `CONFIGURE ZONE +// USING ...` stmt. It ensures all kv options are supported and the values are +// type-checked and normalized. +func getUpdatedZoneConfigOptions( + b BuildCtx, n tree.KVOptions, telemetryName string, +) (map[tree.Name]zone.OptionValue, error) { + + var options map[tree.Name]zone.OptionValue + // We have a CONFIGURE ZONE USING ... assignment. + if n != nil { + options = make(map[tree.Name]zone.OptionValue) + for _, opt := range n { + if _, alreadyExists := options[opt.Key]; alreadyExists { + return nil, pgerror.Newf(pgcode.InvalidParameterValue, + "duplicate zone config parameter: %q", tree.ErrString(&opt.Key)) + } + // Here we are constrained by the supported ZoneConfig fields, + // as described by zone.SupportedZoneConfigOptions. + req, ok := zone.SupportedZoneConfigOptions[opt.Key] + if !ok { + return nil, pgerror.Newf(pgcode.InvalidParameterValue, + "unsupported zone config parameter: %q", tree.ErrString(&opt.Key)) + } + telemetry.Inc( + sqltelemetry.SchemaSetZoneConfigCounter( + telemetryName, + string(opt.Key), + ), + ) + if opt.Value == nil { + options[opt.Key] = zone.OptionValue{InheritValue: true, ExplicitValue: nil} + continue + } + + // Type check and normalize value expr. + typedExpr, err := tree.TypeCheckAndRequire(b, opt.Value, b.SemaCtx(), req.RequiredType, + string(opt.Key)) + if err != nil { + return nil, err + } + etctx := transform.ExprTransformContext{} + valExpr, err := etctx.NormalizeExpr(b, b.EvalCtx(), typedExpr) + if err != nil { + return nil, err + } + + options[opt.Key] = zone.OptionValue{InheritValue: false, ExplicitValue: valExpr} + } + } + return options, nil +} + +// evaluateZoneOptions gets the input options map ready for use. +func evaluateZoneOptions( + b BuildCtx, options map[tree.Name]zone.OptionValue, +) ( + optionsStr []string, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), + err error, +) { + if options != nil { + // Set from var = value attributes. + // + // We iterate over zoneOptionKeys instead of iterating over + // n.options directly so that the optionStr string constructed for + // the event log remains deterministic. + for i := range zone.ZoneOptionKeys { + name := (*tree.Name)(&zone.ZoneOptionKeys[i]) + val, ok := options[*name] + if !ok { + continue + } + // We don't add the setters for the fields that will copy values + // from the parents. These fields will be set by taking what + // value would apply to the zone and setting that value explicitly. + // Instead, we add the fields to a list that we use at a later time + // to copy values over. + inheritVal, expr := val.InheritValue, val.ExplicitValue + if inheritVal { + copyFromParentList = append(copyFromParentList, *name) + optionsStr = append(optionsStr, fmt.Sprintf("%s = COPY FROM PARENT", name)) + continue + } + datum, err := eval.Expr(b, b.EvalCtx(), expr) + if err != nil { + return nil, nil, nil, err + } + if datum == tree.DNull { + return nil, nil, nil, + pgerror.Newf(pgcode.InvalidParameterValue, "unsupported NULL value for %q", + tree.ErrString(name)) + } + opt := zone.SupportedZoneConfigOptions[*name] // Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + if opt.CheckAllowed != nil { + if err := opt.CheckAllowed(b, b.ClusterSettings(), datum); err != nil { + return nil, nil, nil, err + } + } + setter := opt.Setter + setters = append(setters, func(c *zonepb.ZoneConfig) { setter(c, datum) }) + optionsStr = append(optionsStr, fmt.Sprintf("%s = %s", name, datum)) + } + } + return optionsStr, copyFromParentList, setters, nil +} + +// fillZoneConfigsForSubzones fills in the zone configs for subzones. +func fillZoneConfigsForSubzones( + indexID catid.IndexID, + partition string, + tempIndexID catid.IndexID, + subzonePlaceholder bool, + completeZone *zonepb.ZoneConfig, + partialZone *zonepb.ZoneConfig, + newZone zonepb.ZoneConfig, + finalZone zonepb.ZoneConfig, +) { + completeZone.SetSubzone(zonepb.Subzone{ + IndexID: uint32(indexID), + PartitionName: partition, + Config: newZone, + }) + + // The partial zone might just be empty. If so, + // replace it with a SubzonePlaceholder. + if subzonePlaceholder { + partialZone.DeleteTableConfig() + } + + partialZone.SetSubzone(zonepb.Subzone{ + IndexID: uint32(indexID), + PartitionName: partition, + Config: finalZone, + }) + + // Also set the same zone configs for any corresponding temporary indexes. + if tempIndexID != 0 { + completeZone.SetSubzone(zonepb.Subzone{ + IndexID: uint32(tempIndexID), + PartitionName: partition, + Config: newZone, + }) + + partialZone.SetSubzone(zonepb.Subzone{ + IndexID: uint32(tempIndexID), + PartitionName: partition, + Config: finalZone, + }) + } +} + +// loadSettingsToZoneConfigs loads settings from var = val assignments. If there +// were no such settings, (e.g. because the query specified CONFIGURE ZONE = or +// USING DEFAULT), the setter slice will be empty and this will be +// a no-op. This is innocuous. +func loadSettingsToZoneConfigs( + setters []func(c *zonepb.ZoneConfig), newZone *zonepb.ZoneConfig, finalZone *zonepb.ZoneConfig, +) error { + for _, setter := range setters { + // A setter may fail with an error-via-panic. Catch those. + if err := func() (err error) { + defer func() { + if p := recover(); p != nil { + if errP, ok := p.(error); ok { + // Catch and return the error. + err = errP + } else { + // Nothing we know about, let it continue as a panic. + panic(p) + } + } + }() + + setter(newZone) + setter(finalZone) + return nil + }(); err != nil { + return err + } + } + return nil +} + +// fillIndexAndPartitionFromZoneSpecifier fills out the index id in the zone +// specifier for a indexZoneConfigObj. +func fillIndexAndPartitionFromZoneSpecifier( + b BuildCtx, zs tree.ZoneSpecifier, idxObj *indexZoneConfigObj, +) { + tableID := idxObj.getTargetID() + + indexName := string(zs.TableOrIndex.Index) + var indexID catid.IndexID + if indexName == "" { + // Use the primary index if index name is unspecified. + primaryIndexElem := mustRetrieveCurrentPrimaryIndexElement(b, tableID) + indexID = primaryIndexElem.IndexID + } else { + indexElems := b.ResolveIndex(tableID, tree.Name(indexName), ResolveParams{}) + indexID = indexElems.FilterIndexName().MustGetOneElement().IndexID + } + idxObj.indexID = indexID +} + +// lookUpSystemZonesTable attempts to look up the zone config in `system.zones` +// table by `targetID`. +// If `targetID` is not found, a nil `zone` is returned. +func lookUpSystemZonesTable( + b BuildCtx, objType zoneConfigObject, inheritDefaultRange bool, +) (zone *zonepb.ZoneConfig, subzones []zonepb.Subzone, err error) { + // Get the zone config of the DEFAULT RANGE + if inheritDefaultRange { + zc, err := b.ZoneConfigGetter().GetZoneConfig(b, keys.RootNamespaceID) + if err != nil { + return nil, nil, err + } + zone = zc.ZoneConfigProto() + } else { + // It's a descriptor-backed target (i.e. a database ID or a table ID) + zone = objType.retrievePartialZoneConfig(b) + // If we are dealing with Index subzones, clear out the zone config and + // just use the subzones. + if _, ok := objType.(*indexZoneConfigObj); ok { + if zone != nil && zone.Subzones != nil { + subzones = zone.Subzones + zone = nil + } + } + } + return zone, subzones, nil +} + +// validateZoneAttrsAndLocalities ensures that all constraints/lease preferences +// specified in the new zone config snippet are actually valid, meaning that +// they match at least one node. This protects against user typos causing +// zone configs that silently don't work as intended. +// +// validateZoneAttrsAndLocalities is tenant aware in its validation. Secondary +// tenants don't have access to the NodeStatusServer, and as such, aren't +// allowed to set non-locality attributes in their constraints. Furthermore, +// their access is validated using the descs.RegionProvider. +func validateZoneAttrsAndLocalities(b BuildCtx, currentZone, newZone *zonepb.ZoneConfig) error { + // Avoid RPCs to the Node/Region server if we don't have anything to validate. + if len(newZone.Constraints) == 0 && len(newZone.VoterConstraints) == 0 && len(newZone.LeasePreferences) == 0 { + return nil + } + if b.Codec().ForSystemTenant() { + ss, err := b.NodesStatusServer().OptionalNodesStatusServer() + if err != nil { + return err + } + return validateZoneAttrsAndLocalitiesForSystemTenant(b, ss.ListNodesInternal, currentZone, newZone) + } + return validateZoneLocalitiesForSecondaryTenants( + b, b.GetRegions, currentZone, newZone, b.Codec(), b.ClusterSettings(), + ) +} + +type nodeGetter func(context.Context, *serverpb.NodesRequest) (*serverpb.NodesResponse, error) +type regionsGetter func(context.Context) (*serverpb.RegionsResponse, error) + +// validateZoneAttrsAndLocalitiesForSystemTenant performs constraint/ lease +// preferences validation for the system tenant. Only newly added constraints +// are validated. The system tenant is allowed to reference both locality and +// non-locality attributes as it has access to node information via the +// NodeStatusServer. +// +// For the system tenant, this only catches typos in required constraints. This +// is by design. We don't want to reject prohibited constraints whose +// attributes/localities don't match any of the current nodes because it's a +// reasonable use case to add prohibited constraints for a new set of nodes +// before adding the new nodes to the cluster. If you had to first add one of +// the nodes before creating the constraints, data could be replicated there +// that shouldn't be. +func validateZoneAttrsAndLocalitiesForSystemTenant( + b BuildCtx, getNodes nodeGetter, currentZone, newZone *zonepb.ZoneConfig, +) error { + nodes, err := getNodes(b, &serverpb.NodesRequest{}) + if err != nil { + return err + } + + toValidate := accumulateNewUniqueConstraints(currentZone, newZone) + + // Check that each constraint matches some store somewhere in the cluster. + for _, constraint := range toValidate { + // We skip validation for negative constraints. See the function-level comment. + if constraint.Type == zonepb.Constraint_PROHIBITED { + continue + } + var found bool + node: + for _, node := range nodes.Nodes { + for _, store := range node.StoreStatuses { + // We could alternatively use zonepb.StoreMatchesConstraint here to + // catch typos in prohibited constraints as well, but as noted in the + // function-level comment that could break very reasonable use cases for + // prohibited constraints. + if zonepb.StoreSatisfiesConstraint(store.Desc, constraint) { + found = true + break node + } + } + } + if !found { + return pgerror.Newf(pgcode.CheckViolation, + "constraint %q matches no existing nodes within the cluster - did you enter it correctly?", + constraint) + } + } + + return nil +} + +// validateZoneLocalitiesForSecondaryTenants performs constraint/lease +// preferences validation for secondary tenants. Only newly added constraints +// are validated. Unless SecondaryTenantsAllZoneConfigsEnabled is set to 'true', +// secondary tenants are only allowed to reference locality attributes as they +// only have access to region information via the serverpb.TenantStatusServer. +// In that case they're only allowed to reference the "region" and "zone" tiers. +// +// Unlike the system tenant, we also validate prohibited constraints. This is +// because secondary tenant must operate in the narrow view exposed via the +// serverpb.TenantStatusServer and are not allowed to configure arbitrary +// constraints (required or otherwise). +func validateZoneLocalitiesForSecondaryTenants( + ctx context.Context, + getRegions regionsGetter, + currentZone, newZone *zonepb.ZoneConfig, + codec keys.SQLCodec, + settings *cluster.Settings, +) error { + toValidate := accumulateNewUniqueConstraints(currentZone, newZone) + + // rs and zs will be lazily populated with regions and zones, respectively. + // These should not be accessed directly - use getRegionsAndZones helper + // instead. + var rs, zs map[string]struct{} + getRegionsAndZones := func() (regions, zones map[string]struct{}, _ error) { + if rs != nil { + return rs, zs, nil + } + resp, err := getRegions(ctx) + if err != nil { + return nil, nil, err + } + rs, zs = make(map[string]struct{}), make(map[string]struct{}) + for regionName, regionMeta := range resp.Regions { + rs[regionName] = struct{}{} + for _, zone := range regionMeta.Zones { + zs[zone] = struct{}{} + } + } + return rs, zs, nil + } + + for _, constraint := range toValidate { + switch constraint.Key { + case "zone": + _, zones, err := getRegionsAndZones() + if err != nil { + return err + } + _, found := zones[constraint.Value] + if !found { + return pgerror.Newf( + pgcode.CheckViolation, + "zone %q not found", + constraint.Value, + ) + } + case "region": + regions, _, err := getRegionsAndZones() + if err != nil { + return err + } + _, found := regions[constraint.Value] + if !found { + return pgerror.Newf( + pgcode.CheckViolation, + "region %q not found", + constraint.Value, + ) + } + default: + if err := sqlclustersettings.RequireSystemTenantOrClusterSetting( + codec, settings, sqlclustersettings.SecondaryTenantsAllZoneConfigsEnabled, + ); err != nil { + return err + } + } + } + return nil +} + +// accumulateNewUniqueConstraints returns a list of unique constraints in the +// given newZone config proto that are not in the currentZone +func accumulateNewUniqueConstraints(currentZone, newZone *zonepb.ZoneConfig) []zonepb.Constraint { + seenConstraints := make(map[zonepb.Constraint]struct{}) + retConstraints := make([]zonepb.Constraint, 0) + addToValidate := func(c zonepb.Constraint) { + if _, ok := seenConstraints[c]; ok { + // Already in the list or in the current zone config, nothing to do. + return + } + retConstraints = append(retConstraints, c) + seenConstraints[c] = struct{}{} + } + // First scan all the current zone config constraints. + for _, constraints := range currentZone.Constraints { + for _, constraint := range constraints.Constraints { + seenConstraints[constraint] = struct{}{} + } + } + for _, constraints := range currentZone.VoterConstraints { + for _, constraint := range constraints.Constraints { + seenConstraints[constraint] = struct{}{} + } + } + for _, leasePreferences := range currentZone.LeasePreferences { + for _, constraint := range leasePreferences.Constraints { + seenConstraints[constraint] = struct{}{} + } + } + + // Then scan all the new zone config constraints, adding the ones that + // were not seen already. + for _, constraints := range newZone.Constraints { + for _, constraint := range constraints.Constraints { + addToValidate(constraint) + } + } + for _, constraints := range newZone.VoterConstraints { + for _, constraint := range constraints.Constraints { + addToValidate(constraint) + } + } + for _, leasePreferences := range newZone.LeasePreferences { + for _, constraint := range leasePreferences.Constraints { + addToValidate(constraint) + } + } + return retConstraints +} + +// generateSubzoneSpans constructs from a TableID the entries mapping +// zone config spans to subzones for use in the SubzoneSpans field of +// zonepb.ZoneConfig. SubzoneSpans controls which splits are created, so only +// the spans corresponding to entries in subzones are returned. +// +// Zone configs target indexes and partitions via `subzones`, which are attached +// to a table-scoped row in `system.zones`. Each subzone represents one index +// (primary or secondary) or one partition (or subpartition) and contains the +// usual zone config constraints. They are saved to `system.zones` sparsely +// (only when set by a user) and are the most specific entry in the normal +// cluster-default/database/table/subzone config hierarchy. +// +// Each index and partition can be mapped to spans in the keyspace. Indexes and +// range partitions each map to one span, while each list partition maps to one +// or more spans. Each partition span is contained by some index span and each +// subpartition span is contained by one of its parent partition's spans. The +// spans for a given level of a range partitioning (corresponding to one +// `PARTITION BY` in sql or one `PartitionDescriptor`) are disjoint, but the +// spans for a given level of a list partitioning may overlap if DEFAULT is +// used. A list partitioning which includes both (1, DEFAULT) and (1, 2) will +// overlap with the latter getting precedence in the zone config hierarchy. NB: +// In a valid PartitionDescriptor, no partitions with the same number of +// DEFAULTs will overlap (this property is used by +// `indexCoveringsForPartitioning`). +// +// These subzone spans are kept denormalized to the relevant `system.zone` row +// for performance. Given a tableID, the spans for the specific +// index/partition/subpartition is created, filtered out if they don't have a +// config set for them, and precedence applied (via `OverlapCoveringMerge`) to +// produce a set of non-overlapping spans, which each map to a subzone. There +// may be "holes" (uncovered spans) in this set. +// +// The returned spans are returned in exactly the format required by +// `system.zones`. They must be sorted and non-overlapping. Each contains an +// IndexID, which maps to one of the input `subzones` by indexing into the +// slice. As space optimizations, all `Key`s and `EndKey`s of `SubzoneSpan` omit +// the common prefix (the encoded table ID) and if `EndKey` is equal to +// `Key.PrefixEnd()` it is omitted. +func generateSubzoneSpans( + b BuildCtx, + tableID catid.DescID, + subzones []zonepb.Subzone, + indexID catid.IndexID, + partitionName string, +) ([]zonepb.SubzoneSpan, error) { + if err := base.CheckEnterpriseEnabled(b.ClusterSettings(), + "replication zones on indexes or partitions"); err != nil { + return nil, err + } + // We already completely avoid creating subzone spans for dropped indexes. + // Whether this was intentional is a different story, but it turns out to be + // pretty sane. Dropped elements may refer to dropped types and we aren't + // necessarily in a position to deal with those dropped types. Add a special + // case to avoid generating any subzone spans in the face of being dropped. + isDroppedTable := false + b.QueryByID(tableID).FilterTable(). + ForEach(func(current scpb.Status, target scpb.TargetStatus, e *scpb.Table) { + if e.TableID == tableID { + if current == scpb.Status_DROPPED || target == scpb.ToAbsent { + isDroppedTable = true + } + } + }) + if isDroppedTable { + return nil, nil + } + + subzoneIndexByIndexID := make(map[descpb.IndexID]int32) + subzoneIndexByPartition := make(map[string]int32) + for i, subzone := range subzones { + if len(subzone.PartitionName) > 0 { + subzoneIndexByPartition[subzone.PartitionName] = int32(i) + } else { + subzoneIndexByIndexID[descpb.IndexID(subzone.IndexID)] = int32(i) + } + } + + var indexCovering covering.Covering + var partitionCoverings []covering.Covering + b.QueryByID(tableID).FilterIndexName().ForEach(func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.IndexName) { + newIndexCovering, newPartitionCoverings := getCoverings(b, subzoneIndexByIndexID, + subzoneIndexByPartition, tableID, e.IndexID, "") + indexCovering = append(indexCovering, newIndexCovering...) + partitionCoverings = append(partitionCoverings, newPartitionCoverings...) + }) + + // OverlapCoveringMerge returns the payloads for any coverings that overlap + // in the same order they were input. So, we require that they be ordered + // with highest precedence first, so the first payload of each range is the + // one we need. + ranges := covering.OverlapCoveringMerge(append(partitionCoverings, indexCovering)) + + // NB: This assumes that none of the indexes are interleaved, which is + // checked in PartitionDescriptor validation. + sharedPrefix := b.Codec().TablePrefix(uint32(tableID)) + + var subzoneSpans []zonepb.SubzoneSpan + for _, r := range ranges { + payloads := r.Payload.([]interface{}) + if len(payloads) == 0 { + continue + } + subzoneSpan := zonepb.SubzoneSpan{ + Key: bytes.TrimPrefix(r.Start, sharedPrefix), + EndKey: bytes.TrimPrefix(r.End, sharedPrefix), + } + var ok bool + if subzone := payloads[0].(zonepb.Subzone); len(subzone.PartitionName) > 0 { + subzoneSpan.SubzoneIndex, ok = subzoneIndexByPartition[subzone.PartitionName] + } else { + subzoneSpan.SubzoneIndex, ok = subzoneIndexByIndexID[descpb.IndexID(subzone.IndexID)] + } + if !ok { + continue + } + if bytes.Equal(subzoneSpan.Key.PrefixEnd(), subzoneSpan.EndKey) { + subzoneSpan.EndKey = nil + } + subzoneSpans = append(subzoneSpans, subzoneSpan) + } + return subzoneSpans, nil +} + +func getCoverings( + b BuildCtx, + subzoneIndexByIndexID map[descpb.IndexID]int32, + subzoneIndexByPartition map[string]int32, + tableID catid.DescID, + indexID catid.IndexID, + partitionName string, +) (covering.Covering, []covering.Covering) { + var indexCovering covering.Covering + var partitionCoverings []covering.Covering + a := &tree.DatumAlloc{} + idxCols := mustRetrieveIndexColumnElements(b, tableID, indexID) + + for _, idxCol := range idxCols { + _, indexSubzoneExists := subzoneIndexByIndexID[idxCol.IndexID] + if indexSubzoneExists { + prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(b.Codec(), tableID, idxCol.IndexID)) + idxSpan := roachpb.Span{Key: prefix, EndKey: prefix.PrefixEnd()} + // Each index starts with a unique prefix, so (from a precedence + // perspective) it's safe to append them all together. + indexCovering = append(indexCovering, covering.Range{ + Start: idxSpan.Key, End: idxSpan.EndKey, + Payload: zonepb.Subzone{IndexID: uint32(idxCol.IndexID)}, + }) + } + var emptyPrefix []tree.Datum + idxPart := b.QueryByID(tableID).FilterIndexPartitioning(). + Filter(func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.IndexPartitioning) bool { + return e.TableID == tableID && e.IndexID == indexID + }).MustGetZeroOrOneElement() + partition := tabledesc.NewPartitioning(nil) + if idxPart != nil { + partition = tabledesc.NewPartitioning(&idxPart.PartitioningDescriptor) + partition = partition.FindPartitionByName(partitionName) + } + indexPartitionCoverings, err := indexCoveringsForPartitioning( + b, a, tableID, idxCols, partition, subzoneIndexByPartition, emptyPrefix) + if err != nil { + panic(err) + } + // The returned indexPartitionCoverings are sorted with highest + // precedence first. They all start with the index prefix, so cannot + // overlap with the partition coverings for any other index, so (from a + // precedence perspective) it's safe to append them all together. + partitionCoverings = append(partitionCoverings, indexPartitionCoverings...) + } + return indexCovering, partitionCoverings +} + +// indexCoveringsForPartitioning returns span coverings representing the +// partitions in partDesc (including subpartitions). They are sorted with +// highest precedence first and the interval.Range payloads are each a +// `zonepb.Subzone` with the PartitionName set. +func indexCoveringsForPartitioning( + b BuildCtx, + a *tree.DatumAlloc, + tableID catid.DescID, + idxs []*scpb.IndexColumn, + part catalog.Partitioning, + relevantPartitions map[string]int32, + prefixDatums []tree.Datum, +) ([]covering.Covering, error) { + if part == nil || part.NumColumns() == 0 { + return nil, nil + } + + var coverings []covering.Covering + var descendentCoverings []covering.Covering + + if part.NumLists() > 0 { + // The returned spans are required to be ordered with highest precedence + // first. The span for (1, DEFAULT) overlaps with (1, 2) and needs to be + // returned at a lower precedence. Luckily, because of the partitioning + // validation, we're guaranteed that all entries in a list partitioning + // with the same number of DEFAULTs are non-overlapping. So, bucket the + // `interval.Range`s by the number of non-DEFAULT columns and return + // them ordered from least # of DEFAULTs to most. + listCoverings := make([]covering.Covering, part.NumColumns()+1) + err := part.ForEachList(func(name string, values [][]byte, subPartitioning catalog.Partitioning) error { + for _, valueEncBuf := range values { + t, keyPrefix, err := decodePartitionTuple( + b, a, tableID, idxs, part, valueEncBuf, prefixDatums) + if err != nil { + return err + } + if _, ok := relevantPartitions[name]; ok { + listCoverings[len(t.Datums)] = append(listCoverings[len(t.Datums)], covering.Range{ + Start: keyPrefix, End: roachpb.Key(keyPrefix).PrefixEnd(), + Payload: zonepb.Subzone{PartitionName: name}, + }) + } + newPrefixDatums := append(prefixDatums, t.Datums...) + subpartitionCoverings, err := indexCoveringsForPartitioning( + b, a, tableID, idxs, subPartitioning, relevantPartitions, newPrefixDatums) + if err != nil { + return err + } + descendentCoverings = append(descendentCoverings, subpartitionCoverings...) + } + return nil + }) + if err != nil { + return nil, err + } + for i := range listCoverings { + if covering := listCoverings[len(listCoverings)-i-1]; len(covering) > 0 { + coverings = append(coverings, covering) + } + } + } + + if part.NumRanges() > 0 { + err := part.ForEachRange(func(name string, from, to []byte) error { + if _, ok := relevantPartitions[name]; !ok { + return nil + } + _, fromKey, err := decodePartitionTuple( + b, a, tableID, idxs, part, from, prefixDatums) + if err != nil { + return err + } + _, toKey, err := decodePartitionTuple( + b, a, tableID, idxs, part, to, prefixDatums) + if err != nil { + return err + } + if _, ok := relevantPartitions[name]; ok { + coverings = append(coverings, covering.Covering{{ + Start: fromKey, End: toKey, + Payload: zonepb.Subzone{PartitionName: name}, + }}) + } + return nil + }) + if err != nil { + return nil, err + } + } + + // descendentCoverings are from subpartitions and so get precedence; append + // them to the front. + return append(descendentCoverings, coverings...), nil +} + +// decodePartitionTuple parses columns (which are a prefix of the columns of +// `idxDesc`) encoded with the "value" encoding and returns the parsed datums. +// It also reencodes them into a key as they would be for `idxDesc` (accounting +// for index dirs, subpartitioning, etc). +// +// For a list partitioning, this returned key can be used as a prefix scan to +// select all rows that have the given columns as a prefix (this is true even if +// the list partitioning contains DEFAULT). +// +// Examples of the key returned for a list partitioning: +// - (1, 2) -> /table/index/1/2 +// - (1, DEFAULT) -> /table/index/1 +// - (DEFAULT, DEFAULT) -> /table/index +// +// For a range partitioning, this returned key can be used as a exclusive end +// key to select all rows strictly less than ones with the given columns as a +// prefix (this is true even if the range partitioning contains MINVALUE or +// MAXVALUE). +// +// Examples of the key returned for a range partitioning: +// - (1, 2) -> /table/index/1/3 +// - (1, MAXVALUE) -> /table/index/2 +// - (MAXVALUE, MAXVALUE) -> (/table/index).PrefixEnd() +// +// NB: It is checked here that if an entry for a list partitioning contains +// DEFAULT, everything in that entry "after" also has to be DEFAULT. So, (1, 2, +// DEFAULT) is valid but (1, DEFAULT, 2) is not. Similarly for range +// partitioning and MINVALUE/MAXVALUE. +func decodePartitionTuple( + b BuildCtx, + a *tree.DatumAlloc, + tableID catid.DescID, + index []*scpb.IndexColumn, + part catalog.Partitioning, + valueEncBuf []byte, + prefixDatums tree.Datums, +) (*rowenc.PartitionTuple, []byte, error) { + keyColumns := b.QueryByID(tableID).FilterIndexColumn(). + Filter(func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.IndexColumn) bool { + return e.Kind == scpb.IndexColumn_KEY + }) + if len(prefixDatums)+part.NumColumns() > keyColumns.Size() { + return nil, nil, fmt.Errorf("not enough columns in index for this partitioning") + } + + t := &rowenc.PartitionTuple{ + Datums: make(tree.Datums, 0, part.NumColumns()), + } + + for i := len(prefixDatums); i < keyColumns.Size() && i < len(prefixDatums)+part.NumColumns(); i++ { + _, _, keyCol := keyColumns.Get(i) + col := keyCol.(*scpb.IndexColumn) + colType := b.QueryByID(tableID).FilterColumnType().Filter( + func(_ scpb.Status, _ scpb.TargetStatus, e *scpb.ColumnType) bool { + return e.ColumnID == col.ColumnID + }).MustGetOneElement().Type + if _, dataOffset, _, typ, err := encoding.DecodeValueTag(valueEncBuf); err != nil { + return nil, nil, errors.Wrapf(err, "decoding") + } else if typ == encoding.NotNull { + // NOT NULL signals that a PartitionSpecialValCode follows + var valCode uint64 + valueEncBuf, _, valCode, err = encoding.DecodeNonsortingUvarint(valueEncBuf[dataOffset:]) + if err != nil { + return nil, nil, err + } + nextSpecial := rowenc.PartitionSpecialValCode(valCode) + if t.SpecialCount > 0 && t.Special != nextSpecial { + return nil, nil, errors.Newf("non-%[1]s value (%[2]s) not allowed after %[1]s", + t.Special, nextSpecial) + } + t.Special = nextSpecial + t.SpecialCount++ + } else { + var datum tree.Datum + datum, valueEncBuf, err = valueside.Decode(a, colType, valueEncBuf) + if err != nil { + return nil, nil, errors.Wrapf(err, "decoding") + } + if t.SpecialCount > 0 { + return nil, nil, errors.Newf("non-%[1]s value (%[2]s) not allowed after %[1]s", + t.Special, datum) + } + t.Datums = append(t.Datums, datum) + } + } + if len(valueEncBuf) > 0 { + return nil, nil, errors.New("superfluous data in encoded value") + } + + allDatums := append(prefixDatums, t.Datums...) + var colMap catalog.TableColMap + for i := range allDatums { + _, _, keyCol := keyColumns.Get(i) + col := keyCol.(*scpb.IndexColumn) + colMap.Set(col.ColumnID, i) + } + + indexKeyPrefix := rowenc.MakeIndexKeyPrefix(b.Codec(), tableID, index[0].IndexID) + var keyAndSuffixCols []fetchpb.IndexFetchSpec_KeyColumn + for _, i := range index { + indexCol := fetchpb.IndexFetchSpec_Column{ + ColumnID: i.ColumnID, + Name: "", + Type: nil, + IsNonNullable: false, + } + keyAndSuffixCols = append(keyAndSuffixCols, fetchpb.IndexFetchSpec_KeyColumn{ + IndexFetchSpec_Column: indexCol, + Direction: i.Direction, + IsComposite: false, + IsInverted: false, + }) + } + if len(allDatums) > len(keyAndSuffixCols) { + return nil, nil, errors.Errorf("encoding too many columns (%d)", len(allDatums)) + } + key, _, err := rowenc.EncodePartialIndexKey(keyAndSuffixCols[:len(allDatums)], colMap, allDatums, indexKeyPrefix) + if err != nil { + return nil, nil, err + } + + // Currently, key looks something like `/table/index/1`. Given a range + // partitioning of (1), we're done. This can be used as the exclusive end + // key of a scan to fetch all rows strictly less than (1). + // + // If `specialIdx` is not the sentinel, then we're actually in a case like + // `(1, MAXVALUE, ..., MAXVALUE)`. Since this index could have a descending + // nullable column, we can't rely on `/table/index/1/0xff` to be _strictly_ + // larger than everything it should match. Instead, we need `PrefixEnd()`. + // This also intuitively makes sense; we're essentially a key that is + // guaranteed to be less than `(2, MINVALUE, ..., MINVALUE)`. + if t.SpecialCount > 0 && t.Special == rowenc.PartitionMaxVal { + key = roachpb.Key(key).PrefixEnd() + } + + return t, key, nil +} + +// findMostRecentZoneConfig finds the most recent zone config for the targetID. +// Here, "most recent" is defined by highest seqNum. +func findMostRecentZoneConfig[T scpb.ZoneConfigElement]( + obj zoneConfigObject, + zoneConfigElemFilter func(catid.DescID) *scpb.ElementCollection[T], + isSpecificZoneConfigElem func(elem T) bool, +) T { + var mostRecentElem T + highestSeqNum := uint32(0) + targetID := obj.getTargetID() + elems := zoneConfigElemFilter(targetID) + + elems.ForEach(func(_ scpb.Status, _ scpb.TargetStatus, elem T) { + if isSpecificZoneConfigElem(elem) { + if highestSeqNum <= elem.GetSeqNum() { + mostRecentElem = elem + highestSeqNum = elem.GetSeqNum() + } + } + }) + + return mostRecentElem +} + +func prepareZoneConfig( + b BuildCtx, + n *tree.SetZoneConfig, + copyFromParentList []tree.Name, + setters []func(c *zonepb.ZoneConfig), + obj zoneConfigObject, +) (*zonepb.ZoneConfig, error) { + // TODO(annie): once we allow configuring zones for named zones/system ranges, + // we will need to guard against secondary tenants from configuring such + // ranges. + + // Retrieve the partial zone configuration + partialZone := obj.retrievePartialZoneConfig(b) + + // No zone was found. Use an empty zone config that inherits from its parent. + if partialZone == nil { + partialZone = zonepb.NewZoneConfig() + } + currentZone := protoutil.Clone(partialZone).(*zonepb.ZoneConfig) + + // Retrieve the zone configuration. + // + // If the statement was USING DEFAULT, we want to ignore the zone + // config that exists on targetID and instead skip to the inherited + // default (default since our target is a database). For this, we use the last + // parameter getInheritedDefault to retrieveCompleteZoneConfig(). + // These zones are only used for validations. The merged zone will not + // be written. + completeZone, _, err := obj.retrieveCompleteZoneConfig(b, + n.SetDefault /* getInheritedDefault */) + if err != nil { + return nil, err + } + + // We need to inherit zone configuration information from the correct zone, + // not completeZone. + { + // If we are operating on a zone, get all fields that the zone would + // inherit from its parent. We do this by using an empty zoneConfig + // and completing at the level of the current zone. + zoneInheritedFields := zonepb.ZoneConfig{} + if err := obj.completeZoneConfig(b, &zoneInheritedFields); err != nil { + return nil, err + } + partialZone.CopyFromZone(zoneInheritedFields, copyFromParentList) + } + + // Determine where to load the configuration. + newZone := *completeZone + + // Determine where to load the partial configuration. + // finalZone is where the new changes are unmarshalled onto. + finalZone := *partialZone + + if n.SetDefault { + finalZone = *zonepb.NewZoneConfig() + } + + // Fill in our zone configs with var = val assignments. + if err := loadSettingsToZoneConfigs(setters, &newZone, &finalZone); err != nil { + return nil, err + } + + // Validate that there are no conflicts in the zone setup. + if err := zonepb.ValidateNoRepeatKeysInZone(&newZone); err != nil { + return nil, err + } + + if err := validateZoneAttrsAndLocalities(b, currentZone, &newZone); err != nil { + return nil, err + } + + // The final zone config is the one we just processed. + completeZone = &newZone + partialZone = &finalZone + // Since we are writing to a zone that is not a subzone, we need to + // make sure that the zone config is not considered a placeholder + // anymore. If the settings applied to this zone don't touch the + // NumReplicas field, set it to nil so that the zone isn't considered a + // placeholder anymore. + if partialZone.IsSubzonePlaceholder() { + partialZone.NumReplicas = nil + } + + // Finally, revalidate everything. Validate only the completeZone config. + if err := completeZone.Validate(); err != nil { + return nil, pgerror.Wrap(err, pgcode.CheckViolation, "could not validate zone config") + } + + // Finally, check for the extra protection partial zone configs would + // require from changes made to parent zones. The extra protections are: + // + // RangeMinBytes and RangeMaxBytes must be set together + // LeasePreferences cannot be set unless Constraints/VoterConstraints are + // explicitly set + // Per-replica constraints cannot be set unless num_replicas is explicitly + // set + // Per-voter constraints cannot be set unless num_voters is explicitly set + if err := finalZone.ValidateTandemFields(); err != nil { + err = errors.Wrap(err, "could not validate zone config") + err = pgerror.WithCandidateCode(err, pgcode.InvalidParameterValue) + err = errors.WithHint(err, + "try ALTER ... CONFIGURE ZONE USING = COPY FROM PARENT [, ...] to "+ + "populate the field") + return nil, err + } + return partialZone, nil +} diff --git a/pkg/sql/schemachanger/scbuild/testdata/alter_table_add_column b/pkg/sql/schemachanger/scbuild/testdata/alter_table_add_column index ce1dc7dcb0d4..63b1756fa29a 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/alter_table_add_column +++ b/pkg/sql/schemachanger/scbuild/testdata/alter_table_add_column @@ -153,6 +153,48 @@ ALTER TABLE defaultdb.foo ADD COLUMN a INT AS (i+1) STORED - [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] {columnId: 2, indexId: 3, kind: STORED, tableId: 104} +build +ALTER TABLE defaultdb.foo ADD COLUMN serial_id SERIAL +---- +- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC] + {columnId: 1, indexId: 1, tableId: 104} +- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC] + {constraintId: 1, indexId: 1, isUnique: true, tableId: 104} +- [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 1}, ABSENT], PUBLIC] + {indexId: 1, name: foo_pkey, tableId: 104} +- [[IndexData:{DescID: 104, IndexID: 1}, ABSENT], PUBLIC] + {indexId: 1, tableId: 104} +- [[TableData:{DescID: 104, ReferencedDescID: 100}, PUBLIC], PUBLIC] + {databaseId: 100, tableId: 104} +- [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], ABSENT] + {columnId: 2, tableId: 104} +- [[ColumnName:{DescID: 104, Name: serial_id, ColumnID: 2}, PUBLIC], ABSENT] + {columnId: 2, name: serial_id, tableId: 104} +- [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], ABSENT] + {columnId: 2, elementCreationMetadata: {in231OrLater: true, in243OrLater: true}, tableId: 104, type: {family: IntFamily, oid: 20, width: 64}, typeName: INT8} +- [[ColumnDefaultExpression:{DescID: 104, ColumnID: 2, Expr: unique_rowid()}, PUBLIC], ABSENT] + {columnId: 2, expr: unique_rowid(), tableId: 104} +- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], ABSENT] + {constraintId: 2, indexId: 2, isUnique: true, sourceIndexId: 1, tableId: 104, temporaryIndexId: 3} +- [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 2}, PUBLIC], ABSENT] + {indexId: 2, name: foo_pkey, tableId: 104} +- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, PUBLIC], ABSENT] + {columnId: 1, indexId: 2, tableId: 104} +- [[IndexData:{DescID: 104, IndexID: 2}, PUBLIC], ABSENT] + {indexId: 2, tableId: 104} +- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] + {constraintId: 3, indexId: 3, isUnique: true, sourceIndexId: 1, tableId: 104} +- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] + {columnId: 1, indexId: 3, tableId: 104} +- [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] + {indexId: 3, tableId: 104} +- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT] + {columnId: 2, indexId: 2, kind: STORED, tableId: 104} +- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] + {columnId: 2, indexId: 3, kind: STORED, tableId: 104} +- [[ColumnNotNull:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT] + {columnId: 2, indexIdForValidation: 2, tableId: 104} + setup CREATE TABLE defaultdb.bar (j INT); ---- diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_function b/pkg/sql/schemachanger/scbuild/testdata/drop_function index a5d6636571c1..aee177439d71 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_function +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_function @@ -41,5 +41,7 @@ DROP FUNCTION f; {functionId: 109} - [[FunctionNullInputBehavior:{DescID: 109}, ABSENT], PUBLIC] {functionId: 109, nullInputBehavior: {nullInputBehavior: CALLED_ON_NULL_INPUT}} +- [[FunctionSecurity:{DescID: 109}, ABSENT], PUBLIC] + {functionId: 109, security: {}} - [[FunctionBody:{DescID: 109}, ABSENT], PUBLIC] {body: "SELECT a FROM defaultdb.public.t;\nSELECT b FROM defaultdb.public.t@t_idx_b;\nSELECT c FROM defaultdb.public.t@t_idx_c;\nSELECT a FROM defaultdb.public.v;\nSELECT nextval(105:::REGCLASS);", functionId: 109, lang: {lang: SQL}, usesSequenceIds: [105], usesTables: [{columnIds: [1], tableId: 104}, {columnIds: [2], indexId: 2, tableId: 104}, {columnIds: [3], indexId: 3, tableId: 104}], usesTypeIds: [107, 108], usesViews: [{columnIds: [1], viewId: 106}]} diff --git a/pkg/sql/schemachanger/scbuild/testdata/unimplemented_alter_table b/pkg/sql/schemachanger/scbuild/testdata/unimplemented_alter_table index ecc469652668..31350b82d852 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/unimplemented_alter_table +++ b/pkg/sql/schemachanger/scbuild/testdata/unimplemented_alter_table @@ -12,10 +12,6 @@ CREATE TABLE defaultdb.foo ( ); ---- -unimplemented -ALTER TABLE defaultdb.foo ADD COLUMN j SERIAL ----- - unimplemented ALTER TABLE defaultdb.foo ALTER COLUMN i DROP NOT NULL ---- diff --git a/pkg/sql/schemachanger/scdecomp/decomp.go b/pkg/sql/schemachanger/scdecomp/decomp.go index 78ea28ba5577..0c26eea0591c 100644 --- a/pkg/sql/schemachanger/scdecomp/decomp.go +++ b/pkg/sql/schemachanger/scdecomp/decomp.go @@ -441,7 +441,9 @@ func (w *walkCtx) walkRelation(tbl catalog.TableDescriptor) { &scpb.IndexZoneConfig{ TableID: tbl.GetID(), IndexID: catid.IndexID(subZoneCfg.IndexID), + Subzone: subZoneCfg, PartitionName: subZoneCfg.PartitionName, + SeqNum: 0, }) } } @@ -873,6 +875,10 @@ func (w *walkCtx) walkFunction(fnDesc catalog.FunctionDescriptor) { FunctionID: fnDesc.GetID(), NullInputBehavior: catpb.FunctionNullInputBehavior{NullInputBehavior: fnDesc.GetNullInputBehavior()}, }) + w.ev(scpb.Status_PUBLIC, &scpb.FunctionSecurity{ + FunctionID: fnDesc.GetID(), + Security: catpb.FunctionSecurity{Security: fnDesc.GetSecurity()}, + }) fnBody := &scpb.FunctionBody{ FunctionID: fnDesc.GetID(), diff --git a/pkg/sql/schemachanger/scdecomp/testdata/function b/pkg/sql/schemachanger/scdecomp/testdata/function index 3fe51f52d59b..d0fc168413c0 100644 --- a/pkg/sql/schemachanger/scdecomp/testdata/function +++ b/pkg/sql/schemachanger/scdecomp/testdata/function @@ -122,6 +122,11 @@ ElementState: nullInputBehavior: nullInputBehavior: CALLED_ON_NULL_INPUT Status: PUBLIC +- FunctionSecurity: + functionId: 110 + security: + security: INVOKER + Status: PUBLIC - FunctionVolatility: functionId: 110 volatility: diff --git a/pkg/sql/schemachanger/scdeps/exec_deps.go b/pkg/sql/schemachanger/scdeps/exec_deps.go index 3ddf1f4c9de1..f3d8013df298 100644 --- a/pkg/sql/schemachanger/scdeps/exec_deps.go +++ b/pkg/sql/schemachanger/scdeps/exec_deps.go @@ -225,7 +225,7 @@ func (d *txnDeps) DeleteDescriptor(ctx context.Context, id descpb.ID) error { } // UpdateZoneConfig implements the scexec.Catalog interface. -func (d *txnDeps) UpdateZoneConfig(ctx context.Context, id descpb.ID, zc zonepb.ZoneConfig) error { +func (d *txnDeps) UpdateZoneConfig(ctx context.Context, id descpb.ID, zc *zonepb.ZoneConfig) error { var newZc catalog.ZoneConfig oldZc, err := d.descsCollection.GetZoneConfig(ctx, d.txn.KV(), id) if err != nil { @@ -239,10 +239,47 @@ func (d *txnDeps) UpdateZoneConfig(ctx context.Context, id descpb.ID, zc zonepb. if oldZc != nil { rawBytes = oldZc.GetRawBytesInStorage() } - newZc = zone.NewZoneConfigWithRawBytes(&zc, rawBytes) + newZc = zone.NewZoneConfigWithRawBytes(zc, rawBytes) return d.descsCollection.WriteZoneConfigToBatch(ctx, d.kvTrace, d.getOrCreateBatch(), id, newZc) } +// UpdateSubzoneConfig implements the scexec.Catalog interface. +func (d *txnDeps) UpdateSubzoneConfig( + ctx context.Context, + tableID descpb.ID, + subzones []zonepb.Subzone, + subzoneSpans []zonepb.SubzoneSpan, +) error { + var newZc catalog.ZoneConfig + oldZc, err := d.descsCollection.GetZoneConfig(ctx, d.txn.KV(), tableID) + if err != nil { + return err + } + + var rawBytes []byte + var zc *zonepb.ZoneConfig + // If the zone config already exists, we need to preserve the raw bytes as the + // expected value that we will be updating. Otherwise, this will be a clean + // insert with no expected raw bytes. + if oldZc != nil { + rawBytes = oldZc.GetRawBytesInStorage() + zc = oldZc.ZoneConfigProto() + } else { + // If no zone config exists, create a new one. + zc = &zonepb.ZoneConfig{} + } + + // Update the subzones in the zone config. + for _, s := range subzones { + zc.SetSubzone(s) + } + zc.SubzoneSpans = subzoneSpans + + newZc = zone.NewZoneConfigWithRawBytes(zc, rawBytes) + return d.descsCollection.WriteZoneConfigToBatch(ctx, d.kvTrace, d.getOrCreateBatch(), + tableID, newZc) +} + // DeleteZoneConfig implements the scexec.Catalog interface. func (d *txnDeps) DeleteZoneConfig(ctx context.Context, id descpb.ID) error { return d.descsCollection.DeleteZoneConfigInBatch(ctx, d.kvTrace, d.getOrCreateBatch(), id) diff --git a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go index 59f32446c103..297bfb318c05 100644 --- a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go +++ b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go @@ -814,12 +814,36 @@ func (s *TestState) DeleteDescriptor(ctx context.Context, id descpb.ID) error { // UpdateZoneConfig implements the scexec.Catalog interface. func (s *TestState) UpdateZoneConfig( - ctx context.Context, id descpb.ID, zc zonepb.ZoneConfig, + ctx context.Context, id descpb.ID, zc *zonepb.ZoneConfig, ) error { if s.catalogChanges.zoneConfigsToUpdate == nil { s.catalogChanges.zoneConfigsToUpdate = make(map[descpb.ID]*zonepb.ZoneConfig) } - s.catalogChanges.zoneConfigsToUpdate[id] = &zc + s.catalogChanges.zoneConfigsToUpdate[id] = zc + return nil +} + +// UpdateSubzoneConfig implements the scexec.Catalog interface. +func (s *TestState) UpdateSubzoneConfig( + ctx context.Context, + tableID descpb.ID, + subzones []zonepb.Subzone, + subzoneSpans []zonepb.SubzoneSpan, +) error { + if s.catalogChanges.zoneConfigsToUpdate == nil { + s.catalogChanges.zoneConfigsToUpdate = make(map[descpb.ID]*zonepb.ZoneConfig) + } + var zc *zonepb.ZoneConfig + if czc, ok := s.catalogChanges.zoneConfigsToUpdate[tableID]; ok { + czc.Subzones = subzones + czc.SubzoneSpans = subzoneSpans + zc = czc + } else { + zc = zonepb.NewZoneConfig() + zc.Subzones = subzones + zc.SubzoneSpans = subzoneSpans + } + s.catalogChanges.zoneConfigsToUpdate[tableID] = zc return nil } diff --git a/pkg/sql/schemachanger/scexec/dependencies.go b/pkg/sql/schemachanger/scexec/dependencies.go index 777d59497b7d..51bf91254706 100644 --- a/pkg/sql/schemachanger/scexec/dependencies.go +++ b/pkg/sql/schemachanger/scexec/dependencies.go @@ -73,20 +73,25 @@ type Catalog interface { DeleteDescriptor(ctx context.Context, id descpb.ID) error // UpdateZoneConfig upserts a zone config for a descriptor. - UpdateZoneConfig(ctx context.Context, id descpb.ID, zc zonepb.ZoneConfig) error + UpdateZoneConfig(ctx context.Context, id descpb.ID, zc *zonepb.ZoneConfig) error + + // UpdateSubzoneConfig upserts a subzone config into the zone config for a + // descriptor. + UpdateSubzoneConfig( + ctx context.Context, + tableID descpb.ID, + subzones []zonepb.Subzone, + subzoneSpans []zonepb.SubzoneSpan, + ) error // DeleteZoneConfig deletes the zone config for a descriptor. DeleteZoneConfig(ctx context.Context, id descpb.ID) error // UpdateComment upserts a comment for the (objID, subID, cmtType) key. - UpdateComment( - ctx context.Context, key catalogkeys.CommentKey, cmt string, - ) error + UpdateComment(ctx context.Context, key catalogkeys.CommentKey, cmt string) error // DeleteComment deletes a comment with (objID, subID, cmtType) key. - DeleteComment( - ctx context.Context, key catalogkeys.CommentKey, - ) error + DeleteComment(ctx context.Context, key catalogkeys.CommentKey) error // Validate validates all the uncommitted catalog changes performed // in this transaction so far. diff --git a/pkg/sql/schemachanger/scexec/exec_immediate_mutation.go b/pkg/sql/schemachanger/scexec/exec_immediate_mutation.go index 699dea8d75d4..e7bc2eb6e019 100644 --- a/pkg/sql/schemachanger/scexec/exec_immediate_mutation.go +++ b/pkg/sql/schemachanger/scexec/exec_immediate_mutation.go @@ -53,9 +53,14 @@ type sequenceToInit struct { startVal int64 } +// zoneConfigToUpdate is a struct that holds the information needed to update a +// zone config or subzone configs. If zc is subzone config, then we treat this +// as a subzone write and update the subzone configs (along with their subzone +// spans for the table). Otherwise, we write the whole zone config as an update. type zoneConfigToUpdate struct { - id descpb.ID - zc zonepb.ZoneConfig + id descpb.ID + zc *zonepb.ZoneConfig + isSubzoneConfig bool } var _ scmutationexec.ImmediateMutationStateUpdater = (*immediateState)(nil) @@ -136,7 +141,7 @@ func (s *immediateState) InitSequence(id descpb.ID, startVal int64) { }) } -func (s *immediateState) UpdateZoneConfig(id descpb.ID, zc zonepb.ZoneConfig) { +func (s *immediateState) UpdateZoneConfig(id descpb.ID, zc *zonepb.ZoneConfig) { s.zoneConfigsToUpdate = append(s.zoneConfigsToUpdate, zoneConfigToUpdate{ id: id, @@ -144,6 +149,18 @@ func (s *immediateState) UpdateZoneConfig(id descpb.ID, zc zonepb.ZoneConfig) { }) } +func (s *immediateState) UpdateSubzoneConfig( + tableid descpb.ID, subzone zonepb.Subzone, subzoneSpans []zonepb.SubzoneSpan, +) { + zc := &zonepb.ZoneConfig{Subzones: []zonepb.Subzone{subzone}, SubzoneSpans: subzoneSpans} + s.zoneConfigsToUpdate = append(s.zoneConfigsToUpdate, + zoneConfigToUpdate{ + id: tableid, + zc: zc, + isSubzoneConfig: true, + }) +} + func (s *immediateState) Reset() { s.withReset = true } @@ -211,8 +228,16 @@ func (s *immediateState) exec(ctx context.Context, c Catalog) error { } for _, zc := range s.zoneConfigsToUpdate { - if err := c.UpdateZoneConfig(ctx, zc.id, zc.zc); err != nil { - return err + if zc.isSubzoneConfig { + if err := c.UpdateSubzoneConfig(ctx, zc.id, zc.zc.Subzones, + zc.zc.SubzoneSpans); err != nil { + return err + } + } else { + + if err := c.UpdateZoneConfig(ctx, zc.id, zc.zc); err != nil { + return err + } } } diff --git a/pkg/sql/schemachanger/scexec/mocks_generated_test.go b/pkg/sql/schemachanger/scexec/mocks_generated_test.go index 9b318f73a126..c264c45a5e86 100644 --- a/pkg/sql/schemachanger/scexec/mocks_generated_test.go +++ b/pkg/sql/schemachanger/scexec/mocks_generated_test.go @@ -244,8 +244,22 @@ func (mr *MockCatalogMockRecorder) UpdateComment(arg0, arg1, arg2 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateComment", reflect.TypeOf((*MockCatalog)(nil).UpdateComment), arg0, arg1, arg2) } +// UpdateSubzoneConfig mocks base method. +func (m *MockCatalog) UpdateSubzoneConfig(arg0 context.Context, arg1 catid.DescID, arg2 []zonepb.Subzone, arg3 []zonepb.SubzoneSpan) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSubzoneConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateSubzoneConfig indicates an expected call of UpdateSubzoneConfig. +func (mr *MockCatalogMockRecorder) UpdateSubzoneConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSubzoneConfig", reflect.TypeOf((*MockCatalog)(nil).UpdateSubzoneConfig), arg0, arg1, arg2, arg3) +} + // UpdateZoneConfig mocks base method. -func (m *MockCatalog) UpdateZoneConfig(arg0 context.Context, arg1 catid.DescID, arg2 zonepb.ZoneConfig) error { +func (m *MockCatalog) UpdateZoneConfig(arg0 context.Context, arg1 catid.DescID, arg2 *zonepb.ZoneConfig) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateZoneConfig", arg0, arg1, arg2) ret0, _ := ret[0].(error) diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/database.go b/pkg/sql/schemachanger/scexec/scmutationexec/database.go index 51a6561de670..9f05b4067485 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/database.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/database.go @@ -41,6 +41,6 @@ func (i *immediateVisitor) CreateDatabaseDescriptor( func (i *immediateVisitor) AddDatabaseZoneConfig( ctx context.Context, op scop.AddDatabaseZoneConfig, ) error { - i.ImmediateMutationStateUpdater.UpdateZoneConfig(op.DatabaseID, *op.ZoneConfig) + i.ImmediateMutationStateUpdater.UpdateZoneConfig(op.DatabaseID, op.ZoneConfig) return nil } diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/dependencies.go b/pkg/sql/schemachanger/scexec/scmutationexec/dependencies.go index f6466e1b8105..ce700dd25607 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/dependencies.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/dependencies.go @@ -84,7 +84,10 @@ type ImmediateMutationStateUpdater interface { InitSequence(id descpb.ID, startVal int64) // UpdateZoneConfig updates a zone config. - UpdateZoneConfig(id descpb.ID, zc zonepb.ZoneConfig) + UpdateZoneConfig(id descpb.ID, zc *zonepb.ZoneConfig) + + // UpdateSubzoneConfig updates subzone zone configs. + UpdateSubzoneConfig(tableid descpb.ID, subzone zonepb.Subzone, subzoneSpans []zonepb.SubzoneSpan) // Reset schedules a reset of the in-txn catalog state // to undo the modifications from earlier stages. diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/function.go b/pkg/sql/schemachanger/scexec/scmutationexec/function.go index 1256eeec65f8..d258a9befeda 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/function.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/function.go @@ -103,3 +103,14 @@ func (i *immediateVisitor) SetFunctionBody(ctx context.Context, op scop.SetFunct return nil } + +func (i *immediateVisitor) SetFunctionSecurity( + ctx context.Context, op scop.SetFunctionSecurity, +) error { + fn, err := i.checkOutFunction(ctx, op.FunctionID) + if err != nil { + return err + } + fn.SetSecurity(op.Security) + return nil +} diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/index.go b/pkg/sql/schemachanger/scexec/scmutationexec/index.go index 0cb7a2ae6547..5b2017062587 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/index.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/index.go @@ -497,3 +497,10 @@ func (m *deferredVisitor) MaybeAddSplitForIndex( m.AddIndexForMaybeSplitAndScatter(op.TableID, op.IndexID) return nil } + +func (i *immediateVisitor) AddIndexZoneConfig( + ctx context.Context, op scop.AddIndexZoneConfig, +) error { + i.ImmediateMutationStateUpdater.UpdateSubzoneConfig(op.TableID, op.Subzone, op.SubzoneSpans) + return nil +} diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/table.go b/pkg/sql/schemachanger/scexec/scmutationexec/table.go index daa3c7555cb8..ad1201295cf4 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/table.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/table.go @@ -19,6 +19,6 @@ import ( func (i *immediateVisitor) AddTableZoneConfig( ctx context.Context, op scop.AddTableZoneConfig, ) error { - i.ImmediateMutationStateUpdater.UpdateZoneConfig(op.TableID, *op.ZoneConfig) + i.ImmediateMutationStateUpdater.UpdateZoneConfig(op.TableID, op.ZoneConfig) return nil } diff --git a/pkg/sql/schemachanger/schemachanger_test.go b/pkg/sql/schemachanger/schemachanger_test.go index beeb687883b1..58485c337304 100644 --- a/pkg/sql/schemachanger/schemachanger_test.go +++ b/pkg/sql/schemachanger/schemachanger_test.go @@ -620,10 +620,10 @@ func TestConcurrentSchemaChanges(t *testing.T) { // Decrease the adopt loop interval so that retries happen quickly. JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), } - s, sqlDB, _ := serverutils.StartServer(t, params) + s, setupConn, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) dbName, scName, tblName := "testdb", "testsc", "t" - useLegacyOrDeclarative := func() error { + useLegacyOrDeclarative := func(sqlDB *gosql.DB) error { decl := rand.Intn(2) == 0 if !decl { _, err := sqlDB.Exec("SET use_declarative_schema_changer='off';") @@ -633,41 +633,41 @@ func TestConcurrentSchemaChanges(t *testing.T) { return err } - createSchema := func() error { + createSchema := func(conn *gosql.DB) error { return testutils.SucceedsSoonError(func() error { - _, err := sqlDB.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %v;", dbName)) + _, err := conn.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %v;", dbName)) if err != nil { return err } - _, err = sqlDB.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %v.%v;", dbName, scName)) + _, err = conn.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %v.%v;", dbName, scName)) if err != nil { return err } - _, err = sqlDB.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v.%v.%v(col INT PRIMARY KEY);", dbName, scName, tblName)) + _, err = conn.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v.%v.%v(col INT PRIMARY KEY);", dbName, scName, tblName)) if err != nil { return err } - _, err = sqlDB.Exec(fmt.Sprintf("DELETE FROM %v.%v.%v;", dbName, scName, tblName)) + _, err = conn.Exec(fmt.Sprintf("DELETE FROM %v.%v.%v;", dbName, scName, tblName)) if err != nil { return err } - _, err = sqlDB.Exec(fmt.Sprintf("INSERT INTO %v.%v.%v SELECT generate_series(1,100);", dbName, scName, tblName)) + _, err = conn.Exec(fmt.Sprintf("INSERT INTO %v.%v.%v SELECT generate_series(1,100);", dbName, scName, tblName)) if err != nil { return err } return nil }) } - require.NoError(t, createSchema()) + require.NoError(t, createSchema(setupConn)) // repeatWorkWithInterval repeats `work` indefinitely every `workInterval` until // `ctx` is cancelled. repeatWorkWithInterval := func( - workerName string, workInterval time.Duration, work func(sqlDB *gosql.DB) error, + workerName string, workInterval time.Duration, work func(workConn *gosql.DB) error, ) func(context.Context) error { return func(workerCtx context.Context) error { - sqlDB := s.SQLConn(t) - sqlDB.SetMaxOpenConns(1) + workConn := s.SQLConn(t) + workConn.SetMaxOpenConns(1) for { jitteredInterval := workInterval * time.Duration(0.8+0.4*rand.Float32()) select { @@ -675,7 +675,7 @@ func TestConcurrentSchemaChanges(t *testing.T) { t.Logf("%v is signaled to finish work", workerName) return nil case <-time.After(jitteredInterval): - if err := work(sqlDB); err != nil { + if err := work(workConn); err != nil { t.Logf("%v encounters error %v; signal to main routine and finish working", workerName, err.Error()) return err } @@ -686,23 +686,23 @@ func TestConcurrentSchemaChanges(t *testing.T) { var nextObjectID atomic.Int64 // A goroutine that repeatedly renames database `testdb` randomly. - g.GoCtx(repeatWorkWithInterval("rename-db-worker", renameDBInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("rename-db-worker", renameDBInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } drop := rand.Intn(2) == 0 if drop { - if _, err := sqlDB.Exec(fmt.Sprintf("DROP DATABASE %v CASCADE", dbName)); err != nil { + if _, err := workerConn.Exec(fmt.Sprintf("DROP DATABASE %v CASCADE", dbName)); err != nil { return err } t.Logf("DROP DATABASE %v", dbName) - return createSchema() + return createSchema(workerConn) } newDBName := fmt.Sprintf("testdb_%v", nextObjectID.Add(1)) if newDBName == dbName { return nil } - if _, err := sqlDB.Exec(fmt.Sprintf("ALTER DATABASE %v RENAME TO %v", dbName, newDBName)); err != nil { + if _, err := workerConn.Exec(fmt.Sprintf("ALTER DATABASE %v RENAME TO %v", dbName, newDBName)); err != nil { return err } dbName = newDBName @@ -711,8 +711,8 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that renames schema `testdb.testsc` randomly. - g.GoCtx(repeatWorkWithInterval("rename-schema-worker", renameSCInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("rename-schema-worker", renameSCInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } drop := rand.Intn(2) == 0 @@ -722,9 +722,9 @@ func TestConcurrentSchemaChanges(t *testing.T) { } var err error if !drop { - _, err = sqlDB.Exec(fmt.Sprintf("ALTER SCHEMA %v.%v RENAME TO %v", dbName, scName, newSCName)) + _, err = workerConn.Exec(fmt.Sprintf("ALTER SCHEMA %v.%v RENAME TO %v", dbName, scName, newSCName)) } else { - _, err = sqlDB.Exec(fmt.Sprintf("DROP SCHEMA %v.%v CASCADE", dbName, scName)) + _, err = workerConn.Exec(fmt.Sprintf("DROP SCHEMA %v.%v CASCADE", dbName, scName)) } if err == nil { if !drop { @@ -732,7 +732,7 @@ func TestConcurrentSchemaChanges(t *testing.T) { t.Logf("RENAME SCHEMA TO %v", newSCName) } else { t.Logf("DROP SCHEMA TO %v", scName) - return createSchema() + return createSchema(workerConn) } } else if isPQErrWithCode(err, pgcode.UndefinedDatabase, pgcode.UndefinedSchema) { err = nil // mute those errors as they're expected @@ -742,17 +742,17 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that renames table `testdb.testsc.t` randomly. - g.GoCtx(repeatWorkWithInterval("rename-tbl-worker", renameTblInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("rename-tbl-worker", renameTblInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } newTblName := fmt.Sprintf("t_%v", nextObjectID.Add(1)) drop := rand.Intn(2) == 0 var err error if !drop { - _, err = sqlDB.Exec(fmt.Sprintf(`ALTER TABLE %v.%v.%v RENAME TO %v`, dbName, scName, tblName, newTblName)) + _, err = workerConn.Exec(fmt.Sprintf(`ALTER TABLE %v.%v.%v RENAME TO %v`, dbName, scName, tblName, newTblName)) } else { - _, err = sqlDB.Exec(fmt.Sprintf(`DROP TABLE %v.%v.%v`, dbName, scName, tblName)) + _, err = workerConn.Exec(fmt.Sprintf(`DROP TABLE %v.%v.%v`, dbName, scName, tblName)) } if err == nil { if !drop { @@ -760,7 +760,7 @@ func TestConcurrentSchemaChanges(t *testing.T) { t.Logf("RENAME TABLE TO %v", newTblName) } else { t.Logf("DROP TABLE %v", newTblName) - return createSchema() + return createSchema(workerConn) } } else if isPQErrWithCode(err, pgcode.UndefinedDatabase, pgcode.UndefinedSchema, pgcode.InvalidSchemaName, pgcode.UndefinedObject, pgcode.UndefinedTable) { err = nil @@ -770,14 +770,14 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that adds columns to `testdb.testsc.t` randomly. - g.GoCtx(repeatWorkWithInterval("add-column-worker", addColInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("add-column-worker", addColInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } dbName, scName, tblName := dbName, scName, tblName newColName := fmt.Sprintf("col_%v", nextObjectID.Add(1)) - _, err := sqlDB.Exec(fmt.Sprintf("ALTER TABLE %v.%v.%v ADD COLUMN %v INT DEFAULT %v", + _, err := workerConn.Exec(fmt.Sprintf("ALTER TABLE %v.%v.%v ADD COLUMN %v INT DEFAULT %v", dbName, scName, tblName, newColName, rand.Intn(100))) if err == nil { t.Logf("ADD COLUMN %v TO %v.%v.%v", newColName, dbName, scName, tblName) @@ -790,18 +790,18 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that drops columns from `testdb.testsc.t` randomly. - g.GoCtx(repeatWorkWithInterval("drop-column-worker", dropColInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("drop-column-worker", dropColInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } // Randomly pick a non-PK column to drop. dbName, scName, tblName := dbName, scName, tblName - colName, err := getANonPrimaryKeyColumn(sqlDB, dbName, scName, tblName) + colName, err := getANonPrimaryKeyColumn(workerConn, dbName, scName, tblName) if err != nil || colName == "" { return err } - _, err = sqlDB.Exec(fmt.Sprintf("ALTER TABLE %v.%v.%v DROP COLUMN %v;", + _, err = workerConn.Exec(fmt.Sprintf("ALTER TABLE %v.%v.%v DROP COLUMN %v;", dbName, scName, tblName, colName)) if err == nil { t.Logf("DROP COLUMN %v FROM %v.%v.%v", colName, dbName, scName, tblName) @@ -814,17 +814,17 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that creates secondary index on a randomly selected column. - g.GoCtx(repeatWorkWithInterval("create-index-worker", createIdxInterval, func(sqlDB *gosql.DB) error { + g.GoCtx(repeatWorkWithInterval("create-index-worker", createIdxInterval, func(workerConn *gosql.DB) error { newIndexName := fmt.Sprintf("idx_%v", nextObjectID.Add(1)) // Randomly pick a non-PK column to create an index on. dbName, scName, tblName := dbName, scName, tblName - colName, err := getANonPrimaryKeyColumn(sqlDB, dbName, scName, tblName) + colName, err := getANonPrimaryKeyColumn(workerConn, dbName, scName, tblName) if err != nil || colName == "" { return err } - _, err = sqlDB.Exec(fmt.Sprintf("CREATE INDEX %v ON %v.%v.%v (%v);", + _, err = workerConn.Exec(fmt.Sprintf("CREATE INDEX %v ON %v.%v.%v (%v);", newIndexName, dbName, scName, tblName, colName)) if err == nil { t.Logf("CREATE INDEX %v ON %v.%v.%v(%v)", newIndexName, dbName, scName, tblName, colName) @@ -841,17 +841,17 @@ func TestConcurrentSchemaChanges(t *testing.T) { })) // A goroutine that drops a secondary index randomly. - g.GoCtx(repeatWorkWithInterval("drop-index-worker", dropIdxInterval, func(sqlDB *gosql.DB) error { - if err := useLegacyOrDeclarative(); err != nil { + g.GoCtx(repeatWorkWithInterval("drop-index-worker", dropIdxInterval, func(workerConn *gosql.DB) error { + if err := useLegacyOrDeclarative(workerConn); err != nil { return err } // Randomly pick a public, secondary index to drop. dbName, scName, tblName := dbName, scName, tblName - indexName, err := getASecondaryIndex(sqlDB, dbName, scName, tblName) + indexName, err := getASecondaryIndex(workerConn, dbName, scName, tblName) if err != nil || indexName == "" { return err } - _, err = sqlDB.Exec(fmt.Sprintf("DROP INDEX %v.%v.%v@%v;", dbName, scName, tblName, indexName)) + _, err = workerConn.Exec(fmt.Sprintf("DROP INDEX %v.%v.%v@%v;", dbName, scName, tblName, indexName)) if err == nil { t.Logf("DROP INDEX %v FROM %v.%v.%v", indexName, dbName, scName, tblName) } else if isPQErrWithCode(err, pgcode.UndefinedDatabase, pgcode.UndefinedSchema, @@ -870,8 +870,8 @@ func TestConcurrentSchemaChanges(t *testing.T) { } // getANonPrimaryKeyColumn returns a non-primary-key column from table `dbName.scName.tblName`. -func getANonPrimaryKeyColumn(sqlDB *gosql.DB, dbName, scName, tblName string) (string, error) { - colNameRow, err := sqlDB.Query(fmt.Sprintf(` +func getANonPrimaryKeyColumn(workerConn *gosql.DB, dbName, scName, tblName string) (string, error) { + colNameRow, err := workerConn.Query(fmt.Sprintf(` SELECT column_name FROM [show columns from %s.%s.%s] WHERE column_name != 'col' @@ -894,8 +894,8 @@ ORDER BY random(); -- shuffle column output } // getASecondaryIndex returns a secondary index from table `dbName.scName.tblName`. -func getASecondaryIndex(sqlDB *gosql.DB, dbName, scName, tblName string) (string, error) { - colNameRow, err := sqlDB.Query(fmt.Sprintf(` +func getASecondaryIndex(workerConn *gosql.DB, dbName, scName, tblName string) (string, error) { + colNameRow, err := workerConn.Query(fmt.Sprintf(` SELECT index_name FROM [show indexes from %s.%s.%s] WHERE index_name NOT LIKE '%%_pkey' diff --git a/pkg/sql/schemachanger/scjob/BUILD.bazel b/pkg/sql/schemachanger/scjob/BUILD.bazel index 8fccfd343f0c..9a0dffad1328 100644 --- a/pkg/sql/schemachanger/scjob/BUILD.bazel +++ b/pkg/sql/schemachanger/scjob/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/roachpb", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog", "//pkg/sql/catalog/descs", "//pkg/sql/descmetadata", "//pkg/sql/isql", @@ -25,5 +26,6 @@ go_library( "//pkg/sql/schemachanger/scrun", "//pkg/util/log", "//pkg/util/timeutil", + "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/schemachanger/scjob/job.go b/pkg/sql/schemachanger/scjob/job.go index c35aadb1f9e9..f318e7520f17 100644 --- a/pkg/sql/schemachanger/scjob/job.go +++ b/pkg/sql/schemachanger/scjob/job.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/descmetadata" "github.com/cockroachdb/cockroach/pkg/sql/isql" @@ -26,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" ) func init() { @@ -147,6 +149,12 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) ) // Return permanent errors back, otherwise we will try to retry if sql.IsPermanentSchemaChangeError(err) { + // If a descriptor can't be found, we additionally mark the error as a + // permanent job error, so that non-cancelable jobs don't get retried. If a + // descriptor has gone missing, it isn't likely to come back. + if errors.IsAny(err, catalog.ErrDescriptorNotFound, catalog.ErrDescriptorDropped, catalog.ErrReferencedDescriptorNotFound) { + err = jobs.MarkAsPermanentJobError(err) + } return err } if err != nil { diff --git a/pkg/sql/schemachanger/scop/immediate_mutation.go b/pkg/sql/schemachanger/scop/immediate_mutation.go index 9b3afa7e06f7..4824f4a38390 100644 --- a/pkg/sql/schemachanger/scop/immediate_mutation.go +++ b/pkg/sql/schemachanger/scop/immediate_mutation.go @@ -839,6 +839,12 @@ type SetFunctionBody struct { Body scpb.FunctionBody } +type SetFunctionSecurity struct { + immediateMutationOp + FunctionID descpb.ID + Security catpb.Function_Security +} + type UpdateFunctionTypeReferences struct { immediateMutationOp FunctionID descpb.ID @@ -912,3 +918,12 @@ type AddTableZoneConfig struct { TableID descpb.ID ZoneConfig *zonepb.ZoneConfig } + +// AddIndexZoneConfig adds a zone config to an index. +type AddIndexZoneConfig struct { + immediateMutationOp + TableID descpb.ID + IndexID descpb.IndexID + Subzone zonepb.Subzone + SubzoneSpans []zonepb.SubzoneSpan +} diff --git a/pkg/sql/schemachanger/scop/immediate_mutation_visitor_generated.go b/pkg/sql/schemachanger/scop/immediate_mutation_visitor_generated.go index ad316fcb6666..a064033aab24 100644 --- a/pkg/sql/schemachanger/scop/immediate_mutation_visitor_generated.go +++ b/pkg/sql/schemachanger/scop/immediate_mutation_visitor_generated.go @@ -126,6 +126,7 @@ type ImmediateMutationVisitor interface { SetFunctionLeakProof(context.Context, SetFunctionLeakProof) error SetFunctionNullInputBehavior(context.Context, SetFunctionNullInputBehavior) error SetFunctionBody(context.Context, SetFunctionBody) error + SetFunctionSecurity(context.Context, SetFunctionSecurity) error UpdateFunctionTypeReferences(context.Context, UpdateFunctionTypeReferences) error UpdateFunctionRelationReferences(context.Context, UpdateFunctionRelationReferences) error SetObjectParentID(context.Context, SetObjectParentID) error @@ -138,6 +139,7 @@ type ImmediateMutationVisitor interface { CreateDatabaseDescriptor(context.Context, CreateDatabaseDescriptor) error AddDatabaseZoneConfig(context.Context, AddDatabaseZoneConfig) error AddTableZoneConfig(context.Context, AddTableZoneConfig) error + AddIndexZoneConfig(context.Context, AddIndexZoneConfig) error } // Visit is part of the ImmediateMutationOp interface. @@ -660,6 +662,11 @@ func (op SetFunctionBody) Visit(ctx context.Context, v ImmediateMutationVisitor) return v.SetFunctionBody(ctx, op) } +// Visit is part of the ImmediateMutationOp interface. +func (op SetFunctionSecurity) Visit(ctx context.Context, v ImmediateMutationVisitor) error { + return v.SetFunctionSecurity(ctx, op) +} + // Visit is part of the ImmediateMutationOp interface. func (op UpdateFunctionTypeReferences) Visit(ctx context.Context, v ImmediateMutationVisitor) error { return v.UpdateFunctionTypeReferences(ctx, op) @@ -719,3 +726,8 @@ func (op AddDatabaseZoneConfig) Visit(ctx context.Context, v ImmediateMutationVi func (op AddTableZoneConfig) Visit(ctx context.Context, v ImmediateMutationVisitor) error { return v.AddTableZoneConfig(ctx, op) } + +// Visit is part of the ImmediateMutationOp interface. +func (op AddIndexZoneConfig) Visit(ctx context.Context, v ImmediateMutationVisitor) error { + return v.AddIndexZoneConfig(ctx, op) +} diff --git a/pkg/sql/schemachanger/scpb/elements.proto b/pkg/sql/schemachanger/scpb/elements.proto index 3723caa7c8f9..4f57fc5a6d60 100644 --- a/pkg/sql/schemachanger/scpb/elements.proto +++ b/pkg/sql/schemachanger/scpb/elements.proto @@ -148,6 +148,7 @@ message ElementProto { FunctionLeakProof function_leak_proof = 162 [(gogoproto.moretags) = "parent:\"Function\""]; FunctionNullInputBehavior function_null_input_behavior = 163 [(gogoproto.moretags) = "parent:\"Function\""]; FunctionBody function_body = 164 [(gogoproto.moretags) = "parent:\"Function\""]; + FunctionSecurity function_security = 165 [(gogoproto.moretags) = "parent:\"Function\""]; // Type elements. TypeComment type_comment = 180 [(gogoproto.moretags) = "parent:\"CompositeType,EnumType\""]; @@ -682,6 +683,15 @@ message IndexZoneConfig { uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; string partition_name = 3; + cockroach.config.zonepb.Subzone subzone = 4 [(gogoproto.nullable) = false]; + // `subzone_spans` is used solely for zone config writes. These spans are + // recreated for the table zone config during each configuration change. + repeated cockroach.config.zonepb.SubzoneSpan subzone_spans = 5 [(gogoproto.nullable) = false]; + // `seq_num` is used to differentiate different subzone config elements tied + // to the same index/partition. E.g. If we attempt to update a(n) + // index/partition's subzone config, our solution is to drop the existing + // element and add a new element, with different `seq_num`. + uint32 seq_num = 6; } // DatabaseData models what needs to be GCed when a database is dropped. @@ -769,6 +779,11 @@ message FunctionBody { repeated uint32 uses_function_ids = 8 [(gogoproto.customname) = "UsesFunctionIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; } +message FunctionSecurity { + uint32 function_id = 1 [(gogoproto.customname) = "FunctionID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + cockroach.sql.catalog.catpb.FunctionSecurity security = 2 [(gogoproto.nullable) = false]; +} + message ElementCreationMetadata { bool in_23_1_or_later = 1; bool in_24_3_or_later = 2; diff --git a/pkg/sql/schemachanger/scpb/elements_generated.go b/pkg/sql/schemachanger/scpb/elements_generated.go index c03c214f8b07..429cefe7c871 100644 --- a/pkg/sql/schemachanger/scpb/elements_generated.go +++ b/pkg/sql/schemachanger/scpb/elements_generated.go @@ -1199,6 +1199,43 @@ func (c *ElementCollection[E]) FilterFunctionNullInputBehavior() *ElementCollect return (*ElementCollection[*FunctionNullInputBehavior])(ret) } +func (e FunctionSecurity) element() {} + +// Element implements ElementGetter. +func (e * ElementProto_FunctionSecurity) Element() Element { + return e.FunctionSecurity +} + +// ForEachFunctionSecurity iterates over elements of type FunctionSecurity. +// Deprecated +func ForEachFunctionSecurity( + c *ElementCollection[Element], fn func(current Status, target TargetStatus, e *FunctionSecurity), +) { + c.FilterFunctionSecurity().ForEach(fn) +} + +// FindFunctionSecurity finds the first element of type FunctionSecurity. +// Deprecated +func FindFunctionSecurity( + c *ElementCollection[Element], +) (current Status, target TargetStatus, element *FunctionSecurity) { + if tc := c.FilterFunctionSecurity(); !tc.IsEmpty() { + var e Element + current, target, e = tc.Get(0) + element = e.(*FunctionSecurity) + } + return current, target, element +} + +// FunctionSecurityElements filters elements of type FunctionSecurity. +func (c *ElementCollection[E]) FilterFunctionSecurity() *ElementCollection[*FunctionSecurity] { + ret := c.genericFilter(func(_ Status, _ TargetStatus, e Element) bool { + _, ok := e.(*FunctionSecurity) + return ok + }) + return (*ElementCollection[*FunctionSecurity])(ret) +} + func (e FunctionVolatility) element() {} // Element implements ElementGetter. @@ -2600,6 +2637,8 @@ func (e* ElementProto) SetElement(element Element) { e.ElementOneOf = &ElementProto_FunctionName{ FunctionName: t} case *FunctionNullInputBehavior: e.ElementOneOf = &ElementProto_FunctionNullInputBehavior{ FunctionNullInputBehavior: t} + case *FunctionSecurity: + e.ElementOneOf = &ElementProto_FunctionSecurity{ FunctionSecurity: t} case *FunctionVolatility: e.ElementOneOf = &ElementProto_FunctionVolatility{ FunctionVolatility: t} case *IndexColumn: @@ -2710,6 +2749,7 @@ func GetElementOneOfProtos() []interface{} { ((*ElementProto_FunctionLeakProof)(nil)), ((*ElementProto_FunctionName)(nil)), ((*ElementProto_FunctionNullInputBehavior)(nil)), + ((*ElementProto_FunctionSecurity)(nil)), ((*ElementProto_FunctionVolatility)(nil)), ((*ElementProto_IndexColumn)(nil)), ((*ElementProto_IndexComment)(nil)), @@ -2785,6 +2825,7 @@ func GetElementTypes() []interface{} { ((*FunctionLeakProof)(nil)), ((*FunctionName)(nil)), ((*FunctionNullInputBehavior)(nil)), + ((*FunctionSecurity)(nil)), ((*FunctionVolatility)(nil)), ((*IndexColumn)(nil)), ((*IndexComment)(nil)), diff --git a/pkg/sql/schemachanger/scpb/state.go b/pkg/sql/schemachanger/scpb/state.go index 846793a6ba35..0aa6bffc4402 100644 --- a/pkg/sql/schemachanger/scpb/state.go +++ b/pkg/sql/schemachanger/scpb/state.go @@ -146,6 +146,40 @@ func (e *ElementProto) Element() Element { return e.GetElementOneOf().(ElementGetter).Element() } +type ZoneConfigElement interface { + Element + GetSeqNum() uint32 + GetTargetID() catid.DescID +} + +var _ ZoneConfigElement = &DatabaseZoneConfig{} +var _ ZoneConfigElement = &TableZoneConfig{} +var _ ZoneConfigElement = &IndexZoneConfig{} + +func (e *DatabaseZoneConfig) GetSeqNum() uint32 { + return e.SeqNum +} + +func (e *DatabaseZoneConfig) GetTargetID() catid.DescID { + return e.DatabaseID +} + +func (e *TableZoneConfig) GetSeqNum() uint32 { + return e.SeqNum +} + +func (e *TableZoneConfig) GetTargetID() catid.DescID { + return e.TableID +} + +func (e *IndexZoneConfig) GetSeqNum() uint32 { + return e.SeqNum +} + +func (e *IndexZoneConfig) GetTargetID() catid.DescID { + return e.TableID +} + // IsLinkedToSchemaChange return if a Target is linked to a schema change. func (t *Target) IsLinkedToSchemaChange() bool { return t.Metadata.IsLinkedToSchemaChange() diff --git a/pkg/sql/schemachanger/scpb/uml/table.puml b/pkg/sql/schemachanger/scpb/uml/table.puml index 7ebf1f226d63..228178f087ba 100644 --- a/pkg/sql/schemachanger/scpb/uml/table.puml +++ b/pkg/sql/schemachanger/scpb/uml/table.puml @@ -210,6 +210,11 @@ object FunctionNullInputBehavior FunctionNullInputBehavior : FunctionID FunctionNullInputBehavior : NullInputBehavior +object FunctionSecurity + +FunctionSecurity : FunctionID +FunctionSecurity : Security + object FunctionVolatility FunctionVolatility : FunctionID @@ -254,6 +259,9 @@ object IndexZoneConfig IndexZoneConfig : TableID IndexZoneConfig : IndexID IndexZoneConfig : PartitionName +IndexZoneConfig : Subzone +IndexZoneConfig : []SubzoneSpans +IndexZoneConfig : SeqNum object Namespace @@ -454,6 +462,7 @@ Function <|-- FunctionBody Function <|-- FunctionLeakProof Function <|-- FunctionName Function <|-- FunctionNullInputBehavior +Function <|-- FunctionSecurity Function <|-- FunctionVolatility PrimaryIndex <|-- IndexColumn SecondaryIndex <|-- IndexColumn diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel index 38de72dcb739..b2f9b3f89502 100644 --- a/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel @@ -37,6 +37,7 @@ go_library( "opgen_function_leakproof.go", "opgen_function_name.go", "opgen_function_null_input.go", + "opgen_function_security.go", "opgen_function_volatility.go", "opgen_index_column.go", "opgen_index_comment.go", diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_function_security.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_function_security.go new file mode 100644 index 000000000000..af8ad48d8ba0 --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_function_security.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.FunctionSecurity)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.FunctionSecurity) *scop.SetFunctionSecurity { + return &scop.SetFunctionSecurity{ + FunctionID: this.FunctionID, + Security: this.Security.Security, + } + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.FunctionSecurity) *scop.NotImplementedForPublicObjects { + return notImplementedForPublicObjects(this) + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_zone_config.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_zone_config.go index 32869527778c..cdff9f554e9d 100644 --- a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_zone_config.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_zone_config.go @@ -20,8 +20,14 @@ func init() { toPublic( scpb.Status_ABSENT, to(scpb.Status_PUBLIC, - emit(func(this *scpb.IndexZoneConfig) *scop.NotImplemented { - return &scop.NotImplemented{} + emit(func(this *scpb.IndexZoneConfig) *scop.AddIndexZoneConfig { + + return &scop.AddIndexZoneConfig{ + TableID: this.TableID, + IndexID: this.IndexID, + Subzone: this.Subzone, + SubzoneSpans: this.SubzoneSpans, + } }), ), ), diff --git a/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules b/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules index 30f6d978074c..aded0a480298 100644 --- a/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules +++ b/pkg/sql/schemachanger/scplan/internal/rules/current/testdata/deprules @@ -2891,7 +2891,7 @@ deprules kind: Precedence to: relation-Node query: - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - joinOnDescID($dependent, $relation, $relation-id) - ToPublicOrTransient($dependent-Target, $relation-Target) @@ -3135,7 +3135,7 @@ deprules to: referencing-via-attr-Node query: - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinReferencedDescID($referencing-via-attr, $referenced-descriptor, $desc-id) - toAbsent($referenced-descriptor-Target, $referencing-via-attr-Target) - $referenced-descriptor-Node[CurrentStatus] = DROPPED @@ -3191,7 +3191,7 @@ deprules to: dependent-Node query: - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinOnDescID($descriptor, $dependent, $desc-id) - toAbsent($descriptor-Target, $dependent-Target) - $descriptor-Node[CurrentStatus] = DROPPED @@ -3230,7 +3230,7 @@ deprules to: dependent-Node query: - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinOnDescID($relation, $dependent, $relation-id) - ToPublicOrTransient($relation-Target, $dependent-Target) - $relation-Node[CurrentStatus] = DESCRIPTOR_ADDED @@ -3671,7 +3671,7 @@ deprules kind: Precedence to: descriptor-Node query: - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - joinOnDescID($dependent, $descriptor, $desc-id) - toAbsent($dependent-Target, $descriptor-Target) @@ -7267,7 +7267,7 @@ deprules kind: Precedence to: relation-Node query: - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - joinOnDescID($dependent, $relation, $relation-id) - ToPublicOrTransient($dependent-Target, $relation-Target) @@ -7511,7 +7511,7 @@ deprules to: referencing-via-attr-Node query: - $referenced-descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $referencing-via-attr[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaComment', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinReferencedDescID($referencing-via-attr, $referenced-descriptor, $desc-id) - toAbsent($referenced-descriptor-Target, $referencing-via-attr-Target) - $referenced-descriptor-Node[CurrentStatus] = DROPPED @@ -7567,7 +7567,7 @@ deprules to: dependent-Node query: - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraintUnvalidated', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinOnDescID($descriptor, $dependent, $desc-id) - toAbsent($descriptor-Target, $dependent-Target) - $descriptor-Node[CurrentStatus] = DROPPED @@ -7606,7 +7606,7 @@ deprules to: dependent-Node query: - $relation[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseData', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexData', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableData', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - joinOnDescID($relation, $dependent, $relation-id) - ToPublicOrTransient($relation-Target, $dependent-Target) - $relation-Node[CurrentStatus] = DESCRIPTOR_ADDED @@ -8047,7 +8047,7 @@ deprules kind: Precedence to: descriptor-Node query: - - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] + - $dependent[Type] IN ['*scpb.CheckConstraint', '*scpb.CheckConstraintUnvalidated', '*scpb.Column', '*scpb.ColumnComment', '*scpb.ColumnComputeExpression', '*scpb.ColumnDefaultExpression', '*scpb.ColumnFamily', '*scpb.ColumnName', '*scpb.ColumnNotNull', '*scpb.ColumnOnUpdateExpression', '*scpb.ColumnType', '*scpb.CompositeTypeAttrName', '*scpb.CompositeTypeAttrType', '*scpb.ConstraintComment', '*scpb.ConstraintWithoutIndexName', '*scpb.DatabaseComment', '*scpb.DatabaseRegionConfig', '*scpb.DatabaseRoleSetting', '*scpb.DatabaseZoneConfig', '*scpb.EnumTypeValue', '*scpb.ForeignKeyConstraint', '*scpb.ForeignKeyConstraintUnvalidated', '*scpb.FunctionBody', '*scpb.FunctionLeakProof', '*scpb.FunctionName', '*scpb.FunctionNullInputBehavior', '*scpb.FunctionSecurity', '*scpb.FunctionVolatility', '*scpb.IndexColumn', '*scpb.IndexComment', '*scpb.IndexName', '*scpb.IndexPartitioning', '*scpb.IndexZoneConfig', '*scpb.Namespace', '*scpb.Owner', '*scpb.PrimaryIndex', '*scpb.RowLevelTTL', '*scpb.SchemaChild', '*scpb.SchemaComment', '*scpb.SchemaParent', '*scpb.SecondaryIndex', '*scpb.SecondaryIndexPartial', '*scpb.SequenceOption', '*scpb.SequenceOwner', '*scpb.TableComment', '*scpb.TableLocalityGlobal', '*scpb.TableLocalityPrimaryRegion', '*scpb.TableLocalityRegionalByRow', '*scpb.TableLocalitySecondaryRegion', '*scpb.TablePartitioning', '*scpb.TableSchemaLocked', '*scpb.TableZoneConfig', '*scpb.TemporaryIndex', '*scpb.TypeComment', '*scpb.UniqueWithoutIndexConstraint', '*scpb.UniqueWithoutIndexConstraintUnvalidated', '*scpb.UserPrivileges'] - $descriptor[Type] IN ['*scpb.AliasType', '*scpb.CompositeType', '*scpb.Database', '*scpb.EnumType', '*scpb.Function', '*scpb.Schema', '*scpb.Sequence', '*scpb.Table', '*scpb.View'] - joinOnDescID($dependent, $descriptor, $desc-id) - toAbsent($dependent-Target, $descriptor-Target) diff --git a/pkg/sql/schemachanger/scplan/testdata/alter_table_add_column b/pkg/sql/schemachanger/scplan/testdata/alter_table_add_column index 0ab9b0a1f1c8..4c226eea6f14 100644 --- a/pkg/sql/schemachanger/scplan/testdata/alter_table_add_column +++ b/pkg/sql/schemachanger/scplan/testdata/alter_table_add_column @@ -1374,6 +1374,374 @@ PostCommitNonRevertiblePhase stage 3 of 3 with 5 MutationType ops IsNonCancelable: true JobID: 1 +ops +ALTER TABLE defaultdb.foo ADD COLUMN serial_id SERIAL +---- +StatementPhase stage 1 of 1 with 10 MutationType ops + transitions: + [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], ABSENT] -> DELETE_ONLY + [[ColumnName:{DescID: 104, Name: serial_id, ColumnID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], ABSENT] -> PUBLIC + [[ColumnDefaultExpression:{DescID: 104, ColumnID: 2, Expr: unique_rowid()}, PUBLIC], ABSENT] -> PUBLIC + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], ABSENT] -> BACKFILL_ONLY + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[IndexData:{DescID: 104, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> DELETE_ONLY + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC + ops: + *scop.MakeAbsentColumnDeleteOnly + Column: + ColumnID: 2 + TableID: 104 + *scop.SetColumnName + ColumnID: 2 + Name: serial_id + TableID: 104 + *scop.UpsertColumnType + ColumnType: + ColumnID: 2 + ElementCreationMetadata: + in231OrLater: true + in243OrLater: true + TableID: 104 + TypeT: + Type: + family: IntFamily + oid: 20 + width: 64 + TypeName: INT8 + *scop.AddColumnDefaultExpression + Default: + ColumnID: 2 + Expression: + Expr: unique_rowid() + TableID: 104 + *scop.MakeAbsentIndexBackfilling + Index: + ConstraintID: 2 + IndexID: 2 + IsUnique: true + SourceIndexID: 1 + TableID: 104 + TemporaryIndexID: 3 + *scop.AddColumnToIndex + ColumnID: 1 + IndexID: 2 + TableID: 104 + *scop.MakeAbsentTempIndexDeleteOnly + Index: + ConstraintID: 3 + IndexID: 3 + IsUnique: true + SourceIndexID: 1 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 1 + IndexID: 3 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 2 + IndexID: 2 + Kind: 2 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 2 + IndexID: 3 + Kind: 2 + TableID: 104 +PreCommitPhase stage 1 of 2 with 1 MutationType op + transitions: + [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> ABSENT + [[ColumnName:{DescID: 104, Name: serial_id, ColumnID: 2}, PUBLIC], PUBLIC] -> ABSENT + [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], PUBLIC] -> ABSENT + [[ColumnDefaultExpression:{DescID: 104, ColumnID: 2, Expr: unique_rowid()}, PUBLIC], PUBLIC] -> ABSENT + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], BACKFILL_ONLY] -> ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, PUBLIC], PUBLIC] -> ABSENT + [[IndexData:{DescID: 104, IndexID: 2}, PUBLIC], PUBLIC] -> ABSENT + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], DELETE_ONLY] -> ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], PUBLIC] -> ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> ABSENT + ops: + *scop.UndoAllInTxnImmediateMutationOpSideEffects + {} +PreCommitPhase stage 2 of 2 with 14 MutationType ops + transitions: + [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], ABSENT] -> DELETE_ONLY + [[ColumnName:{DescID: 104, Name: serial_id, ColumnID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[ColumnType:{DescID: 104, ColumnFamilyID: 0, ColumnID: 2, TypeName: INT8}, PUBLIC], ABSENT] -> PUBLIC + [[ColumnDefaultExpression:{DescID: 104, ColumnID: 2, Expr: unique_rowid()}, PUBLIC], ABSENT] -> PUBLIC + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], ABSENT] -> BACKFILL_ONLY + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[IndexData:{DescID: 104, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT] -> DELETE_ONLY + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC + ops: + *scop.MakeAbsentColumnDeleteOnly + Column: + ColumnID: 2 + TableID: 104 + *scop.SetColumnName + ColumnID: 2 + Name: serial_id + TableID: 104 + *scop.UpsertColumnType + ColumnType: + ColumnID: 2 + ElementCreationMetadata: + in231OrLater: true + in243OrLater: true + TableID: 104 + TypeT: + Type: + family: IntFamily + oid: 20 + width: 64 + TypeName: INT8 + *scop.AddColumnDefaultExpression + Default: + ColumnID: 2 + Expression: + Expr: unique_rowid() + TableID: 104 + *scop.MakeAbsentIndexBackfilling + Index: + ConstraintID: 2 + IndexID: 2 + IsUnique: true + SourceIndexID: 1 + TableID: 104 + TemporaryIndexID: 3 + *scop.MaybeAddSplitForIndex + IndexID: 2 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 1 + IndexID: 2 + TableID: 104 + *scop.MakeAbsentTempIndexDeleteOnly + Index: + ConstraintID: 3 + IndexID: 3 + IsUnique: true + SourceIndexID: 1 + TableID: 104 + *scop.MaybeAddSplitForIndex + IndexID: 3 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 1 + IndexID: 3 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 2 + IndexID: 2 + Kind: 2 + TableID: 104 + *scop.AddColumnToIndex + ColumnID: 2 + IndexID: 3 + Kind: 2 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + Initialize: true + *scop.CreateSchemaChangerJob + Authorization: + AppName: $ internal-test + UserName: root + DescriptorIDs: + - 104 + JobID: 1 + RunningStatus: PostCommitPhase stage 1 of 7 with 3 MutationType ops pending + Statements: + - statement: ALTER TABLE defaultdb.foo ADD COLUMN serial_id SERIAL8 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹serial_id› INT8 + statementtag: ALTER TABLE +PostCommitPhase stage 1 of 7 with 5 MutationType ops + transitions: + [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> WRITE_ONLY + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], DELETE_ONLY] -> WRITE_ONLY + [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], ABSENT] -> PUBLIC + [[ColumnNotNull:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT] -> WRITE_ONLY + ops: + *scop.MakeDeleteOnlyColumnWriteOnly + ColumnID: 2 + TableID: 104 + *scop.MakeDeleteOnlyIndexWriteOnly + IndexID: 3 + TableID: 104 + *scop.MakeAbsentColumnNotNullWriteOnly + ColumnID: 2 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + JobID: 1 +PostCommitPhase stage 2 of 7 with 1 BackfillType op + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], BACKFILL_ONLY] -> BACKFILLED + ops: + *scop.BackfillIndex + IndexID: 2 + SourceIndexID: 1 + TableID: 104 +PostCommitPhase stage 3 of 7 with 3 MutationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], BACKFILLED] -> DELETE_ONLY + ops: + *scop.MakeBackfillingIndexDeleteOnly + IndexID: 2 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + JobID: 1 +PostCommitPhase stage 4 of 7 with 3 MutationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], DELETE_ONLY] -> MERGE_ONLY + ops: + *scop.MakeBackfilledIndexMerging + IndexID: 2 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + JobID: 1 +PostCommitPhase stage 5 of 7 with 1 BackfillType op + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], MERGE_ONLY] -> MERGED + ops: + *scop.MergeIndex + BackfilledIndexID: 2 + TableID: 104 + TemporaryIndexID: 3 +PostCommitPhase stage 6 of 7 with 4 MutationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], MERGED] -> WRITE_ONLY + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], WRITE_ONLY] -> TRANSIENT_DELETE_ONLY + ops: + *scop.MakeWriteOnlyIndexDeleteOnly + IndexID: 3 + TableID: 104 + *scop.MakeMergedIndexWriteOnly + IndexID: 2 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + JobID: 1 +PostCommitPhase stage 7 of 7 with 2 ValidationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], WRITE_ONLY] -> VALIDATED + [[ColumnNotNull:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], WRITE_ONLY] -> VALIDATED + ops: + *scop.ValidateIndex + IndexID: 2 + TableID: 104 + *scop.ValidateColumnNotNull + ColumnID: 2 + IndexIDForValidation: 2 + TableID: 104 +PostCommitNonRevertiblePhase stage 1 of 3 with 12 MutationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC] -> VALIDATED + [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT + [[Column:{DescID: 104, ColumnID: 2}, PUBLIC], WRITE_ONLY] -> PUBLIC + [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], VALIDATED] -> PUBLIC + [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 2}, PUBLIC], ABSENT] -> PUBLIC + [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], TRANSIENT_DELETE_ONLY] -> TRANSIENT_ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT + [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT + [[ColumnNotNull:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], VALIDATED] -> PUBLIC + ops: + *scop.MakePublicPrimaryIndexWriteOnly + IndexID: 1 + TableID: 104 + *scop.SetIndexName + IndexID: 1 + Name: crdb_internal_index_1_name_placeholder + TableID: 104 + *scop.SetIndexName + IndexID: 2 + Name: foo_pkey + TableID: 104 + *scop.RemoveColumnFromIndex + ColumnID: 1 + IndexID: 3 + TableID: 104 + *scop.RemoveColumnFromIndex + ColumnID: 2 + IndexID: 3 + Kind: 2 + TableID: 104 + *scop.MakeValidatedColumnNotNullPublic + ColumnID: 2 + TableID: 104 + *scop.MakeValidatedPrimaryIndexPublic + IndexID: 2 + TableID: 104 + *scop.MakeIndexAbsent + IndexID: 3 + TableID: 104 + *scop.MakeWriteOnlyColumnPublic + ColumnID: 2 + TableID: 104 + *scop.RefreshStats + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + IsNonCancelable: true + JobID: 1 +PostCommitNonRevertiblePhase stage 2 of 3 with 4 MutationType ops + transitions: + [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT + [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], VALIDATED] -> DELETE_ONLY + ops: + *scop.MakeWriteOnlyIndexDeleteOnly + IndexID: 1 + TableID: 104 + *scop.RemoveColumnFromIndex + ColumnID: 1 + IndexID: 1 + TableID: 104 + *scop.SetJobStateOnDescriptor + DescriptorID: 104 + *scop.UpdateSchemaChangerJob + IsNonCancelable: true + JobID: 1 +PostCommitNonRevertiblePhase stage 3 of 3 with 5 MutationType ops + transitions: + [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], DELETE_ONLY] -> ABSENT + [[IndexData:{DescID: 104, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT + [[IndexData:{DescID: 104, IndexID: 3}, TRANSIENT_ABSENT], PUBLIC] -> TRANSIENT_ABSENT + ops: + *scop.MakeIndexAbsent + IndexID: 1 + TableID: 104 + *scop.CreateGCJobForIndex + IndexID: 1 + StatementForDropJob: + Statement: ALTER TABLE defaultdb.public.foo ADD COLUMN serial_id INT8 + TableID: 104 + *scop.CreateGCJobForIndex + IndexID: 3 + StatementForDropJob: + Statement: ALTER TABLE defaultdb.public.foo ADD COLUMN serial_id INT8 + TableID: 104 + *scop.RemoveJobStateFromDescriptor + DescriptorID: 104 + JobID: 1 + *scop.UpdateSchemaChangerJob + DescriptorIDsToRemove: + - 104 + IsNonCancelable: true + JobID: 1 setup CREATE TABLE defaultdb.bar (j INT); diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_function b/pkg/sql/schemachanger/scplan/testdata/drop_function index 9ae99aa455b3..d8b6dabfb590 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_function +++ b/pkg/sql/schemachanger/scplan/testdata/drop_function @@ -21,7 +21,7 @@ $$; ops DROP FUNCTION f; ---- -StatementPhase stage 1 of 1 with 13 MutationType ops +StatementPhase stage 1 of 1 with 14 MutationType ops transitions: [[Owner:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[UserPrivileges:{DescID: 109, Name: admin}, ABSENT], PUBLIC] -> ABSENT @@ -33,6 +33,7 @@ StatementPhase stage 1 of 1 with 13 MutationType ops [[FunctionVolatility:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionLeakProof:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionNullInputBehavior:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT + [[FunctionSecurity:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionBody:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDropped @@ -52,6 +53,9 @@ StatementPhase stage 1 of 1 with 13 MutationType ops *scop.NotImplementedForPublicObjects DescID: 109 ElementType: scpb.FunctionNullInputBehavior + *scop.NotImplementedForPublicObjects + DescID: 109 + ElementType: scpb.FunctionSecurity *scop.RemoveBackReferenceInTypes BackReferencedDescriptorID: 109 TypeIDs: @@ -91,11 +95,12 @@ PreCommitPhase stage 1 of 2 with 1 MutationType op [[FunctionVolatility:{DescID: 109}, ABSENT], ABSENT] -> PUBLIC [[FunctionLeakProof:{DescID: 109}, ABSENT], ABSENT] -> PUBLIC [[FunctionNullInputBehavior:{DescID: 109}, ABSENT], ABSENT] -> PUBLIC + [[FunctionSecurity:{DescID: 109}, ABSENT], ABSENT] -> PUBLIC [[FunctionBody:{DescID: 109}, ABSENT], ABSENT] -> PUBLIC ops: *scop.UndoAllInTxnImmediateMutationOpSideEffects {} -PreCommitPhase stage 2 of 2 with 20 MutationType ops +PreCommitPhase stage 2 of 2 with 21 MutationType ops transitions: [[Owner:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[UserPrivileges:{DescID: 109, Name: admin}, ABSENT], PUBLIC] -> ABSENT @@ -107,6 +112,7 @@ PreCommitPhase stage 2 of 2 with 20 MutationType ops [[FunctionVolatility:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionLeakProof:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionNullInputBehavior:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT + [[FunctionSecurity:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT [[FunctionBody:{DescID: 109}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDropped @@ -126,6 +132,9 @@ PreCommitPhase stage 2 of 2 with 20 MutationType ops *scop.NotImplementedForPublicObjects DescID: 109 ElementType: scpb.FunctionNullInputBehavior + *scop.NotImplementedForPublicObjects + DescID: 109 + ElementType: scpb.FunctionSecurity *scop.RemoveBackReferenceInTypes BackReferencedDescriptorID: 109 TypeIDs: diff --git a/pkg/sql/schemachanger/screl/attr.go b/pkg/sql/schemachanger/screl/attr.go index dfdeed1e2c6c..859ccf18b627 100644 --- a/pkg/sql/schemachanger/screl/attr.go +++ b/pkg/sql/schemachanger/screl/attr.go @@ -399,6 +399,7 @@ var elementSchemaOptions = []rel.SchemaOption{ rel.EntityMapping(t((*scpb.IndexZoneConfig)(nil)), rel.EntityAttr(DescID, "TableID"), rel.EntityAttr(IndexID, "IndexID"), + rel.EntityAttr(SeqNum, "SeqNum"), ), rel.EntityMapping(t((*scpb.DatabaseData)(nil)), rel.EntityAttr(DescID, "DatabaseID"), @@ -435,6 +436,9 @@ var elementSchemaOptions = []rel.SchemaOption{ rel.EntityMapping(t((*scpb.FunctionBody)(nil)), rel.EntityAttr(DescID, "FunctionID"), ), + rel.EntityMapping(t((*scpb.FunctionSecurity)(nil)), + rel.EntityAttr(DescID, "FunctionID"), + ), } // Schema is the schema exported by this package covering the elements of scpb. diff --git a/pkg/sql/schemachanger/screl/scalars.go b/pkg/sql/schemachanger/screl/scalars.go index c57bfb294427..dd6b7220695d 100644 --- a/pkg/sql/schemachanger/screl/scalars.go +++ b/pkg/sql/schemachanger/screl/scalars.go @@ -127,7 +127,7 @@ func VersionSupportsElementUse(el scpb.Element, version clusterversion.ClusterVe return true case *scpb.TypeComment, *scpb.DatabaseZoneConfig: return version.IsActive(clusterversion.V24_2) - case *scpb.ColumnComputeExpression: + case *scpb.ColumnComputeExpression, *scpb.FunctionSecurity: return version.IsActive(clusterversion.V24_3) default: panic(errors.AssertionFailedf("unknown element %T", el)) diff --git a/pkg/sql/schemachanger/sctest/comparator.go b/pkg/sql/schemachanger/sctest/comparator.go index 0cc9b7fe1d14..2baaa604cd36 100644 --- a/pkg/sql/schemachanger/sctest/comparator.go +++ b/pkg/sql/schemachanger/sctest/comparator.go @@ -696,6 +696,6 @@ func metaDataIdentityCheck( if len(diff) > 0 { err := errors.Newf("descriptors mismatch with diff (- is legacy, + is declarative):\n%v", diff) err = errors.Wrapf(err, "\ndescriptors diverge after executing %q", lastExecutedLine) - t.Fatalf(err.Error()) + t.Fatal(err) } } diff --git a/pkg/sql/schemachanger/sctest_generated_test.go b/pkg/sql/schemachanger/sctest_generated_test.go index 6ac77ba1a821..26cca79fa4df 100644 --- a/pkg/sql/schemachanger/sctest_generated_test.go +++ b/pkg/sql/schemachanger/sctest_generated_test.go @@ -55,6 +55,13 @@ func TestEndToEndSideEffects_add_column_no_default(t *testing.T) { sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestEndToEndSideEffects_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.EndToEndSideEffects(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestEndToEndSideEffects_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -559,6 +566,13 @@ func TestExecuteWithDMLInjection_add_column_no_default(t *testing.T) { sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestExecuteWithDMLInjection_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.ExecuteWithDMLInjection(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestExecuteWithDMLInjection_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1063,6 +1077,13 @@ func TestGenerateSchemaChangeCorpus_add_column_no_default(t *testing.T) { sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestGenerateSchemaChangeCorpus_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.GenerateSchemaChangeCorpus(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestGenerateSchemaChangeCorpus_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -1567,6 +1588,13 @@ func TestPause_add_column_no_default(t *testing.T) { sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestPause_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.Pause(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestPause_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -2071,6 +2099,13 @@ func TestPauseMixedVersion_add_column_no_default(t *testing.T) { sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestPauseMixedVersion_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.PauseMixedVersion(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestPauseMixedVersion_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -2575,6 +2610,13 @@ func TestRollback_add_column_no_default(t *testing.T) { sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{}) } +func TestRollback_add_column_serial(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + const path = "pkg/sql/schemachanger/testdata/end_to_end/add_column_serial" + sctest.Rollback(t, path, sctest.SingleNodeTestClusterFactory{}) +} + func TestRollback_add_column_virtual_not_null(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.definition b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.definition new file mode 100644 index 000000000000..a72237f7ddb4 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.definition @@ -0,0 +1,33 @@ +setup +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); +---- + +stage-exec phase=PostCommitPhase stage=: +INSERT INTO db.public.tbl VALUES($stageKey); +INSERT INTO db.public.tbl VALUES($stageKey + 1); +---- + +# Each insert will be injected twice per stage, so we should always, +# see a count of 2. +stage-query phase=PostCommitPhase stage=: +SELECT count(*)=$successfulStageCount*2 FROM db.public.tbl; +---- +true + + +stage-exec phase=PostCommitNonRevertiblePhase stage=: +INSERT INTO db.public.tbl VALUES($stageKey); +INSERT INTO db.public.tbl VALUES($stageKey + 1); +---- + +# Each insert will be injected twice per stage, so we should always, +# see a count of 2. +stage-query phase=PostCommitNonRevertiblePhase stage=: +SELECT count(*)=$successfulStageCount*2 FROM db.public.tbl; +---- +true + +test +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL +---- diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain new file mode 100644 index 000000000000..7a0a68f11976 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain @@ -0,0 +1,183 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +EXPLAIN (DDL) ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +---- +Schema change plan for ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹serial_id› INT8; + ├── StatementPhase + │ └── Stage 1 of 1 in StatementPhase + │ ├── 8 elements transitioning toward PUBLIC + │ │ ├── ABSENT → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id+)} + │ │ ├── ABSENT → PUBLIC ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id+)} + │ │ ├── ABSENT → PUBLIC ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id+), TypeName: "INT8"} + │ │ ├── ABSENT → PUBLIC ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), Expr: unique_rowid()} + │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey+)} + │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+)} + │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ ├── 3 elements transitioning toward TRANSIENT_ABSENT + │ │ ├── ABSENT → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 3} + │ └── 10 Mutation operations + │ ├── MakeAbsentColumnDeleteOnly {"Column":{"ColumnID":2,"TableID":106}} + │ ├── SetColumnName {"ColumnID":2,"Name":"serial_id","TableID":106} + │ ├── UpsertColumnType {"ColumnType":{"ColumnID":2,"TableID":106}} + │ ├── AddColumnDefaultExpression {"Default":{"ColumnID":2,"TableID":106}} + │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":2,"IndexID":2,"IsUnique":true,"SourceIndexID":1,"TableID":106,"TemporaryIndexID":3}} + │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── MakeAbsentTempIndexDeleteOnly {"Index":{"ConstraintID":3,"IndexID":3,"IsUnique":true,"SourceIndexID":1,"TableID":106}} + │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ └── AddColumnToIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + ├── PreCommitPhase + │ ├── Stage 1 of 2 in PreCommitPhase + │ │ ├── 8 elements transitioning toward PUBLIC + │ │ │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id+)} + │ │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id+)} + │ │ │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id+), TypeName: "INT8"} + │ │ │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), Expr: unique_rowid()} + │ │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey+)} + │ │ │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+)} + │ │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ │ ├── 3 elements transitioning toward TRANSIENT_ABSENT + │ │ │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ │ └── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 3} + │ │ └── 1 Mutation operation + │ │ └── UndoAllInTxnImmediateMutationOpSideEffects + │ └── Stage 2 of 2 in PreCommitPhase + │ ├── 8 elements transitioning toward PUBLIC + │ │ ├── ABSENT → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id+)} + │ │ ├── ABSENT → PUBLIC ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id+)} + │ │ ├── ABSENT → PUBLIC ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id+), TypeName: "INT8"} + │ │ ├── ABSENT → PUBLIC ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), Expr: unique_rowid()} + │ │ ├── ABSENT → BACKFILL_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey+)} + │ │ ├── ABSENT → PUBLIC IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+)} + │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ ├── 3 elements transitioning toward TRANSIENT_ABSENT + │ │ ├── ABSENT → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ └── ABSENT → PUBLIC IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 3} + │ └── 14 Mutation operations + │ ├── MakeAbsentColumnDeleteOnly {"Column":{"ColumnID":2,"TableID":106}} + │ ├── SetColumnName {"ColumnID":2,"Name":"serial_id","TableID":106} + │ ├── UpsertColumnType {"ColumnType":{"ColumnID":2,"TableID":106}} + │ ├── AddColumnDefaultExpression {"Default":{"ColumnID":2,"TableID":106}} + │ ├── MakeAbsentIndexBackfilling {"Index":{"ConstraintID":2,"IndexID":2,"IsUnique":true,"SourceIndexID":1,"TableID":106,"TemporaryIndexID":3}} + │ ├── MaybeAddSplitForIndex {"IndexID":2,"TableID":106} + │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── MakeAbsentTempIndexDeleteOnly {"Index":{"ConstraintID":3,"IndexID":3,"IsUnique":true,"SourceIndexID":1,"TableID":106}} + │ ├── MaybeAddSplitForIndex {"IndexID":3,"TableID":106} + │ ├── AddColumnToIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── AddColumnToIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106,"Initialize":true} + │ └── CreateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."} + ├── PostCommitPhase + │ ├── Stage 1 of 7 in PostCommitPhase + │ │ ├── 2 elements transitioning toward PUBLIC + │ │ │ ├── DELETE_ONLY → WRITE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id+)} + │ │ │ └── ABSENT → WRITE_ONLY ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ │ ├── 2 elements transitioning toward TRANSIENT_ABSENT + │ │ │ ├── DELETE_ONLY → WRITE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ │ └── ABSENT → PUBLIC IndexData:{DescID: 106 (tbl), IndexID: 3} + │ │ └── 5 Mutation operations + │ │ ├── MakeDeleteOnlyColumnWriteOnly {"ColumnID":2,"TableID":106} + │ │ ├── MakeDeleteOnlyIndexWriteOnly {"IndexID":3,"TableID":106} + │ │ ├── MakeAbsentColumnNotNullWriteOnly {"ColumnID":2,"TableID":106} + │ │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."} + │ ├── Stage 2 of 7 in PostCommitPhase + │ │ ├── 1 element transitioning toward PUBLIC + │ │ │ └── BACKFILL_ONLY → BACKFILLED PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── 1 Backfill operation + │ │ └── BackfillIndex {"IndexID":2,"SourceIndexID":1,"TableID":106} + │ ├── Stage 3 of 7 in PostCommitPhase + │ │ ├── 1 element transitioning toward PUBLIC + │ │ │ └── BACKFILLED → DELETE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── 3 Mutation operations + │ │ ├── MakeBackfillingIndexDeleteOnly {"IndexID":2,"TableID":106} + │ │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."} + │ ├── Stage 4 of 7 in PostCommitPhase + │ │ ├── 1 element transitioning toward PUBLIC + │ │ │ └── DELETE_ONLY → MERGE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── 3 Mutation operations + │ │ ├── MakeBackfilledIndexMerging {"IndexID":2,"TableID":106} + │ │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."} + │ ├── Stage 5 of 7 in PostCommitPhase + │ │ ├── 1 element transitioning toward PUBLIC + │ │ │ └── MERGE_ONLY → MERGED PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── 1 Backfill operation + │ │ └── MergeIndex {"BackfilledIndexID":2,"TableID":106,"TemporaryIndexID":3} + │ ├── Stage 6 of 7 in PostCommitPhase + │ │ ├── 1 element transitioning toward PUBLIC + │ │ │ └── MERGED → WRITE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── 1 element transitioning toward TRANSIENT_ABSENT + │ │ │ └── WRITE_ONLY → TRANSIENT_DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── 4 Mutation operations + │ │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ │ ├── MakeMergedIndexWriteOnly {"IndexID":2,"TableID":106} + │ │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ │ └── UpdateSchemaChangerJob {"RunningStatus":"PostCommitPhase ..."} + │ └── Stage 7 of 7 in PostCommitPhase + │ ├── 2 elements transitioning toward PUBLIC + │ │ ├── WRITE_ONLY → VALIDATED PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ └── WRITE_ONLY → VALIDATED ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ └── 2 Validation operations + │ ├── ValidateIndex {"IndexID":2,"TableID":106} + │ └── ValidateColumnNotNull {"ColumnID":2,"IndexIDForValidation":2,"TableID":106} + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 3 in PostCommitNonRevertiblePhase + │ ├── 4 elements transitioning toward PUBLIC + │ │ ├── WRITE_ONLY → PUBLIC Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id+)} + │ │ ├── VALIDATED → PUBLIC PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey+), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── ABSENT → PUBLIC IndexName:{DescID: 106 (tbl), Name: "tbl_pkey", IndexID: 2 (tbl_pkey+)} + │ │ └── VALIDATED → PUBLIC ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 2 (tbl_pkey+)} + │ ├── 3 elements transitioning toward TRANSIENT_ABSENT + │ │ ├── TRANSIENT_DELETE_ONLY → TRANSIENT_ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey-)} + │ │ ├── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ └── PUBLIC → TRANSIENT_ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id+), IndexID: 3} + │ ├── 2 elements transitioning toward ABSENT + │ │ ├── PUBLIC → VALIDATED PrimaryIndex:{DescID: 106 (tbl), IndexID: 1 (tbl_pkey-), ConstraintID: 1} + │ │ └── PUBLIC → ABSENT IndexName:{DescID: 106 (tbl), Name: "tbl_pkey", IndexID: 1 (tbl_pkey-)} + │ └── 12 Mutation operations + │ ├── MakePublicPrimaryIndexWriteOnly {"IndexID":1,"TableID":106} + │ ├── SetIndexName {"IndexID":1,"Name":"crdb_internal_in...","TableID":106} + │ ├── SetIndexName {"IndexID":2,"Name":"tbl_pkey","TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeValidatedColumnNotNullPublic {"ColumnID":2,"TableID":106} + │ ├── MakeValidatedPrimaryIndexPublic {"IndexID":2,"TableID":106} + │ ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + │ ├── MakeWriteOnlyColumnPublic {"ColumnID":2,"TableID":106} + │ ├── RefreshStats {"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + ├── Stage 2 of 3 in PostCommitNonRevertiblePhase + │ ├── 2 elements transitioning toward ABSENT + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 1 (tbl_pkey-)} + │ │ └── VALIDATED → DELETE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 1 (tbl_pkey-), ConstraintID: 1} + │ └── 4 Mutation operations + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":1,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":1,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 3 of 3 in PostCommitNonRevertiblePhase + ├── 1 element transitioning toward TRANSIENT_ABSENT + │ └── PUBLIC → TRANSIENT_ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + ├── 2 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 1 (tbl_pkey-), ConstraintID: 1} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 1 (tbl_pkey-)} + └── 5 Mutation operations + ├── MakeIndexAbsent {"IndexID":1,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":1,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain_shape b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain_shape new file mode 100644 index 000000000000..9253f0462df0 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.explain_shape @@ -0,0 +1,18 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +EXPLAIN (DDL, SHAPE) ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +---- +Schema change plan for ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹serial_id› INT8; + ├── execute 2 system table mutations transactions + ├── backfill using primary index tbl_pkey- in relation tbl + │ └── into tbl_pkey+ (i; serial_id+) + ├── execute 2 system table mutations transactions + ├── merge temporary indexes into backfilled indexes in relation tbl + │ └── from tbl@[3] into tbl_pkey+ + ├── execute 1 system table mutations transaction + ├── validate UNIQUE constraint backed by index tbl_pkey+ in relation tbl + ├── validate NOT NULL constraint on column serial_id+ in index tbl_pkey+ in relation tbl + └── execute 3 system table mutations transactions diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.side_effects b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.side_effects new file mode 100644 index 000000000000..9faac8131640 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial.side_effects @@ -0,0 +1,634 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); +---- +... ++database {0 0 db} -> 104 ++schema {104 0 public} -> 105 ++object {104 105 tbl} -> 106 + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +---- +begin transaction #1 +# begin StatementPhase +checking for feature: ALTER TABLE +increment telemetry for sql.schema.alter_table +increment telemetry for sql.schema.alter_table.add_column +increment telemetry for sql.schema.qualifcation.default_expr +increment telemetry for sql.schema.new_column_type.int8 +write *eventpb.AlterTable to event log: + mutationId: 1 + sql: + descriptorId: 106 + statement: ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹serial_id› INT8 + tag: ALTER TABLE + user: root + tableName: db.public.tbl +## StatementPhase stage 1 of 1 with 10 MutationType ops +upsert descriptor #106 + ... + - columnIds: + - 1 + + - 2 + columnNames: + - i + + - serial_id + + defaultColumnId: 2 + name: primary + formatVersion: 3 + id: 106 + modificationTime: {} + + mutations: + + - column: + + defaultExpr: unique_rowid() + + id: 2 + + name: serial_id + + nullable: true + + type: + + family: IntFamily + + oid: 20 + + width: 64 + + direction: ADD + + mutationId: 1 + + state: DELETE_ONLY + + - direction: ADD + + index: + + constraintId: 2 + + createdExplicitly: true + + encodingType: 1 + + foreignKey: {} + + geoConfig: {} + + id: 2 + + interleave: {} + + keyColumnDirections: + + - ASC + + keyColumnIds: + + - 1 + + keyColumnNames: + + - i + + name: crdb_internal_index_2_name_placeholder + + partitioning: {} + + sharded: {} + + storeColumnIds: + + - 2 + + storeColumnNames: + + - serial_id + + unique: true + + version: 4 + + mutationId: 1 + + state: BACKFILLING + + - direction: ADD + + index: + + constraintId: 3 + + createdExplicitly: true + + encodingType: 1 + + foreignKey: {} + + geoConfig: {} + + id: 3 + + interleave: {} + + keyColumnDirections: + + - ASC + + keyColumnIds: + + - 1 + + keyColumnNames: + + - i + + name: crdb_internal_index_3_name_placeholder + + partitioning: {} + + sharded: {} + + storeColumnIds: + + - 2 + + storeColumnNames: + + - serial_id + + unique: true + + useDeletePreservingEncoding: true + + version: 4 + + mutationId: 1 + + state: DELETE_ONLY + name: tbl + - nextColumnId: 2 + - nextConstraintId: 2 + + nextColumnId: 3 + + nextConstraintId: 4 + nextFamilyId: 1 + - nextIndexId: 2 + + nextIndexId: 4 + nextMutationId: 1 + parentId: 104 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "1" + + version: "2" +# end StatementPhase +# begin PreCommitPhase +## PreCommitPhase stage 1 of 2 with 1 MutationType op +undo all catalog changes within txn #1 +persist all catalog changes to storage +## PreCommitPhase stage 2 of 2 with 14 MutationType ops +upsert descriptor #106 + ... + createAsOfTime: + wallTime: "1640995200000000000" + + declarativeSchemaChangerState: + + authorization: + + userName: root + + currentStatuses: + + jobId: "1" + + nameMapping: + + columns: + + "1": i + + "2": serial_id + + "4294967294": tableoid + + "4294967295": crdb_internal_mvcc_timestamp + + families: + + "0": primary + + id: 106 + + indexes: + + "2": tbl_pkey + + name: tbl + + relevantStatements: + + - statement: + + redactedStatement: ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹serial_id› INT8 + + statement: ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL8 + + statementTag: ALTER TABLE + + revertible: true + + targetRanks: + + targets: + families: + - columnIds: + - 1 + + - 2 + columnNames: + - i + + - serial_id + + defaultColumnId: 2 + name: primary + formatVersion: 3 + id: 106 + modificationTime: {} + + mutations: + + - column: + + defaultExpr: unique_rowid() + + id: 2 + + name: serial_id + + nullable: true + + type: + + family: IntFamily + + oid: 20 + + width: 64 + + direction: ADD + + mutationId: 1 + + state: DELETE_ONLY + + - direction: ADD + + index: + + constraintId: 2 + + createdExplicitly: true + + encodingType: 1 + + foreignKey: {} + + geoConfig: {} + + id: 2 + + interleave: {} + + keyColumnDirections: + + - ASC + + keyColumnIds: + + - 1 + + keyColumnNames: + + - i + + name: crdb_internal_index_2_name_placeholder + + partitioning: {} + + sharded: {} + + storeColumnIds: + + - 2 + + storeColumnNames: + + - serial_id + + unique: true + + version: 4 + + mutationId: 1 + + state: BACKFILLING + + - direction: ADD + + index: + + constraintId: 3 + + createdExplicitly: true + + encodingType: 1 + + foreignKey: {} + + geoConfig: {} + + id: 3 + + interleave: {} + + keyColumnDirections: + + - ASC + + keyColumnIds: + + - 1 + + keyColumnNames: + + - i + + name: crdb_internal_index_3_name_placeholder + + partitioning: {} + + sharded: {} + + storeColumnIds: + + - 2 + + storeColumnNames: + + - serial_id + + unique: true + + useDeletePreservingEncoding: true + + version: 4 + + mutationId: 1 + + state: DELETE_ONLY + name: tbl + - nextColumnId: 2 + - nextConstraintId: 2 + + nextColumnId: 3 + + nextConstraintId: 4 + nextFamilyId: 1 + - nextIndexId: 2 + + nextIndexId: 4 + nextMutationId: 1 + parentId: 104 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "1" + + version: "2" +persist all catalog changes to storage +create job #1 (non-cancelable: false): "ALTER TABLE db.public.tbl ADD COLUMN serial_id INT8" + descriptor IDs: [106] +# end PreCommitPhase +commit transaction #1 +notified job registry to adopt jobs: [1] +# begin PostCommitPhase +begin transaction #2 +commit transaction #2 +begin transaction #3 +## PostCommitPhase stage 1 of 7 with 5 MutationType ops +upsert descriptor #106 + table: + + checks: + + - columnIds: + + - 2 + + expr: serial_id IS NOT NULL + + isNonNullConstraint: true + + name: serial_id_auto_not_null + + validity: Validating + columns: + - id: 1 + ... + direction: ADD + mutationId: 1 + - state: DELETE_ONLY + + state: WRITE_ONLY + - direction: ADD + index: + ... + version: 4 + mutationId: 1 + - state: DELETE_ONLY + + state: WRITE_ONLY + + - constraint: + + check: + + columnIds: + + - 2 + + expr: serial_id IS NOT NULL + + isNonNullConstraint: true + + name: serial_id_auto_not_null + + validity: Validating + + constraintType: NOT_NULL + + foreignKey: {} + + name: serial_id_auto_not_null + + notNullColumn: 2 + + uniqueWithoutIndexConstraint: {} + + direction: ADD + + mutationId: 1 + + state: WRITE_ONLY + name: tbl + nextColumnId: 3 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "2" + + version: "3" +persist all catalog changes to storage +update progress of schema change job #1: "PostCommitPhase stage 2 of 7 with 1 BackfillType op pending" +commit transaction #3 +begin transaction #4 +## PostCommitPhase stage 2 of 7 with 1 BackfillType op +backfill indexes [2] from index #1 in table #106 +commit transaction #4 +begin transaction #5 +## PostCommitPhase stage 3 of 7 with 3 MutationType ops +upsert descriptor #106 + ... + version: 4 + mutationId: 1 + - state: BACKFILLING + + state: DELETE_ONLY + - direction: ADD + index: + ... + time: {} + unexposedParentSchemaId: 105 + - version: "3" + + version: "4" +persist all catalog changes to storage +update progress of schema change job #1: "PostCommitPhase stage 4 of 7 with 1 MutationType op pending" +commit transaction #5 +begin transaction #6 +## PostCommitPhase stage 4 of 7 with 3 MutationType ops +upsert descriptor #106 + ... + version: 4 + mutationId: 1 + - state: DELETE_ONLY + + state: MERGING + - direction: ADD + index: + ... + time: {} + unexposedParentSchemaId: 105 + - version: "4" + + version: "5" +persist all catalog changes to storage +update progress of schema change job #1: "PostCommitPhase stage 5 of 7 with 1 BackfillType op pending" +commit transaction #6 +begin transaction #7 +## PostCommitPhase stage 5 of 7 with 1 BackfillType op +merge temporary indexes [3] into backfilled indexes [2] in table #106 +commit transaction #7 +begin transaction #8 +## PostCommitPhase stage 6 of 7 with 4 MutationType ops +upsert descriptor #106 + ... + version: 4 + mutationId: 1 + - state: MERGING + - - direction: ADD + + state: WRITE_ONLY + + - direction: DROP + index: + constraintId: 3 + ... + version: 4 + mutationId: 1 + - state: WRITE_ONLY + + state: DELETE_ONLY + - constraint: + check: + ... + time: {} + unexposedParentSchemaId: 105 + - version: "5" + + version: "6" +persist all catalog changes to storage +update progress of schema change job #1: "PostCommitPhase stage 7 of 7 with 2 ValidationType ops pending" +commit transaction #8 +begin transaction #9 +## PostCommitPhase stage 7 of 7 with 2 ValidationType ops +validate forward indexes [2] in table #106 +validate CHECK constraint serial_id_auto_not_null in table #106 +commit transaction #9 +begin transaction #10 +## PostCommitNonRevertiblePhase stage 1 of 3 with 12 MutationType ops +upsert descriptor #106 + table: + - checks: + - - columnIds: + - - 2 + - expr: serial_id IS NOT NULL + - isNonNullConstraint: true + - name: serial_id_auto_not_null + - validity: Validating + + checks: [] + columns: + - id: 1 + ... + oid: 20 + width: 64 + + - defaultExpr: unique_rowid() + + id: 2 + + name: serial_id + + type: + + family: IntFamily + + oid: 20 + + width: 64 + createAsOfTime: + wallTime: "1640995200000000000" + ... + statement: ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL8 + statementTag: ALTER TABLE + - revertible: true + targetRanks: + targets: + ... + modificationTime: {} + mutations: + - - column: + - defaultExpr: unique_rowid() + - id: 2 + - name: serial_id + - nullable: true + - type: + - family: IntFamily + - oid: 20 + - width: 64 + - direction: ADD + - mutationId: 1 + - state: WRITE_ONLY + - - direction: ADD + - index: + - constraintId: 2 + - createdExplicitly: true + - encodingType: 1 + - foreignKey: {} + - geoConfig: {} + - id: 2 + - interleave: {} + - keyColumnDirections: + - - ASC + - keyColumnIds: + - - 1 + - keyColumnNames: + - - i + - name: crdb_internal_index_2_name_placeholder + - partitioning: {} + - sharded: {} + - storeColumnIds: + - - 2 + - storeColumnNames: + - - serial_id + - unique: true + - version: 4 + - mutationId: 1 + - state: WRITE_ONLY + - direction: DROP + index: + - constraintId: 3 + - createdExplicitly: true + + constraintId: 1 + + createdAtNanos: "1640995200000000000" + encodingType: 1 + foreignKey: {} + geoConfig: {} + - id: 3 + + id: 1 + interleave: {} + keyColumnDirections: + ... + keyColumnNames: + - i + - name: crdb_internal_index_3_name_placeholder + + name: crdb_internal_index_1_name_placeholder + partitioning: {} + sharded: {} + - storeColumnIds: + - - 2 + - storeColumnNames: + - - serial_id + unique: true + - useDeletePreservingEncoding: true + version: 4 + mutationId: 1 + - state: DELETE_ONLY + - - constraint: + - check: + - columnIds: + - - 2 + - expr: serial_id IS NOT NULL + - isNonNullConstraint: true + - name: serial_id_auto_not_null + - validity: Validating + - constraintType: NOT_NULL + - foreignKey: {} + - name: serial_id_auto_not_null + - notNullColumn: 2 + - uniqueWithoutIndexConstraint: {} + - direction: ADD + - mutationId: 1 + state: WRITE_ONLY + name: tbl + ... + parentId: 104 + primaryIndex: + - constraintId: 1 + - createdAtNanos: "1640995200000000000" + + constraintId: 2 + + createdExplicitly: true + encodingType: 1 + foreignKey: {} + geoConfig: {} + - id: 1 + + id: 2 + interleave: {} + keyColumnDirections: + ... + partitioning: {} + sharded: {} + + storeColumnIds: + + - 2 + + storeColumnNames: + + - serial_id + unique: true + version: 4 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "6" + + version: "7" +persist all catalog changes to storage +adding table for stats refresh: 106 +update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 2 of 3 with 2 MutationType ops pending" +set schema change job #1 to non-cancellable +commit transaction #10 +begin transaction #11 +## PostCommitNonRevertiblePhase stage 2 of 3 with 4 MutationType ops +upsert descriptor #106 + ... + version: 4 + mutationId: 1 + - state: WRITE_ONLY + + state: DELETE_ONLY + name: tbl + nextColumnId: 3 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "7" + + version: "8" +persist all catalog changes to storage +update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 3 of 3 with 3 MutationType ops pending" +commit transaction #11 +begin transaction #12 +## PostCommitNonRevertiblePhase stage 3 of 3 with 5 MutationType ops +upsert descriptor #106 + ... + createAsOfTime: + wallTime: "1640995200000000000" + - declarativeSchemaChangerState: + - authorization: + - userName: root + - currentStatuses: + - jobId: "1" + - nameMapping: + - columns: + - "1": i + - "2": serial_id + - "4294967294": tableoid + - "4294967295": crdb_internal_mvcc_timestamp + - families: + - "0": primary + - id: 106 + - indexes: + - "2": tbl_pkey + - name: tbl + - relevantStatements: + - - statement: + - redactedStatement: ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹serial_id› INT8 + - statement: ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL8 + - statementTag: ALTER TABLE + - targetRanks: + - targets: + families: + - columnIds: + ... + id: 106 + modificationTime: {} + - mutations: + - - direction: DROP + - index: + - constraintId: 1 + - createdAtNanos: "1640995200000000000" + - encodingType: 1 + - foreignKey: {} + - geoConfig: {} + - id: 1 + - interleave: {} + - keyColumnDirections: + - - ASC + - keyColumnIds: + - - 1 + - keyColumnNames: + - - i + - name: crdb_internal_index_1_name_placeholder + - partitioning: {} + - sharded: {} + - unique: true + - version: 4 + - mutationId: 1 + - state: DELETE_ONLY + + mutations: [] + name: tbl + nextColumnId: 3 + ... + time: {} + unexposedParentSchemaId: 105 + - version: "8" + + version: "9" +persist all catalog changes to storage +create job #2 (non-cancelable: true): "GC for ALTER TABLE db.public.tbl ADD COLUMN serial_id INT8" + descriptor IDs: [106] +update progress of schema change job #1: "all stages completed" +set schema change job #1 to non-cancellable +updated schema change job #1 descriptor IDs to [] +write *eventpb.FinishSchemaChange to event log: + sc: + descriptorId: 106 +commit transaction #12 +notified job registry to adopt jobs: [2] +# end PostCommitPhase diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_1_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_1_of_7.explain new file mode 100644 index 000000000000..1110227fae5a --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_1_of_7.explain @@ -0,0 +1,36 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 1 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + └── Stage 1 of 1 in PostCommitNonRevertiblePhase + ├── 11 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + └── 12 Mutation operations + ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_2_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_2_of_7.explain new file mode 100644 index 000000000000..46fcbfbff31c --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_2_of_7.explain @@ -0,0 +1,49 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 2 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 6 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 7 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_3_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_3_of_7.explain new file mode 100644 index 000000000000..343fb79a2d6e --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_3_of_7.explain @@ -0,0 +1,49 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 3 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── BACKFILL_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 6 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 7 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_4_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_4_of_7.explain new file mode 100644 index 000000000000..ebd1507bad1a --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_4_of_7.explain @@ -0,0 +1,49 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 4 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 6 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 7 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_5_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_5_of_7.explain new file mode 100644 index 000000000000..c18d7bb02917 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_5_of_7.explain @@ -0,0 +1,51 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 5 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 7 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 8 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_6_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_6_of_7.explain new file mode 100644 index 000000000000..d40517a05d1e --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_6_of_7.explain @@ -0,0 +1,51 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 6 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── MERGE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 7 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ ├── DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 8 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_7_of_7.explain b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_7_of_7.explain new file mode 100644 index 000000000000..e6f9a9498835 --- /dev/null +++ b/pkg/sql/schemachanger/testdata/end_to_end/add_column_serial/add_column_serial__rollback_7_of_7.explain @@ -0,0 +1,49 @@ +/* setup */ +CREATE DATABASE db; +CREATE TABLE db.public.tbl (i INT PRIMARY KEY); + +/* test */ +ALTER TABLE db.public.tbl ADD COLUMN serial_id SERIAL; +EXPLAIN (DDL) rollback at post-commit stage 7 of 7; +---- +Schema change plan for rolling back ALTER TABLE ‹db›.public.‹tbl› ADD COLUMN ‹serial_id› INT8; + └── PostCommitNonRevertiblePhase + ├── Stage 1 of 2 in PostCommitNonRevertiblePhase + │ ├── 9 elements transitioning toward ABSENT + │ │ ├── WRITE_ONLY → DELETE_ONLY Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ │ ├── PUBLIC → ABSENT ColumnName:{DescID: 106 (tbl), Name: "serial_id", ColumnID: 2 (serial_id-)} + │ │ ├── WRITE_ONLY → DELETE_ONLY PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 2 (tbl_pkey-)} + │ │ ├── TRANSIENT_DELETE_ONLY → ABSENT TemporaryIndex:{DescID: 106 (tbl), IndexID: 3, ConstraintID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 1 (i), IndexID: 3} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ │ ├── PUBLIC → ABSENT IndexColumn:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 3} + │ │ └── WRITE_ONLY → ABSENT ColumnNotNull:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), IndexID: 2 (tbl_pkey-)} + │ └── 11 Mutation operations + │ ├── MakeWriteOnlyColumnDeleteOnly {"ColumnID":2,"TableID":106} + │ ├── SetColumnName {"ColumnID":2,"Name":"crdb_internal_co...","TableID":106} + │ ├── MakeWriteOnlyIndexDeleteOnly {"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":1,"IndexID":3,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":2,"Kind":2,"TableID":106} + │ ├── RemoveColumnFromIndex {"ColumnID":2,"IndexID":3,"Kind":2,"TableID":106} + │ ├── MakeIndexAbsent {"IndexID":3,"TableID":106} + │ ├── RemoveColumnNotNull {"ColumnID":2,"TableID":106} + │ ├── SetJobStateOnDescriptor {"DescriptorID":106} + │ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."} + └── Stage 2 of 2 in PostCommitNonRevertiblePhase + ├── 6 elements transitioning toward ABSENT + │ ├── DELETE_ONLY → ABSENT Column:{DescID: 106 (tbl), ColumnID: 2 (serial_id-)} + │ ├── PUBLIC → ABSENT ColumnType:{DescID: 106 (tbl), ColumnFamilyID: 0 (primary), ColumnID: 2 (serial_id-), TypeName: "INT8"} + │ ├── PUBLIC → ABSENT ColumnDefaultExpression:{DescID: 106 (tbl), ColumnID: 2 (serial_id-), Expr: unique_rowid()} + │ ├── DELETE_ONLY → ABSENT PrimaryIndex:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-), ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1 (tbl_pkey+)} + │ ├── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 2 (tbl_pkey-)} + │ └── PUBLIC → ABSENT IndexData:{DescID: 106 (tbl), IndexID: 3} + └── 7 Mutation operations + ├── RemoveColumnDefaultExpression {"ColumnID":2,"TableID":106} + ├── MakeIndexAbsent {"IndexID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":2,"TableID":106} + ├── CreateGCJobForIndex {"IndexID":3,"TableID":106} + ├── MakeDeleteOnlyColumnAbsent {"ColumnID":2,"TableID":106} + ├── RemoveJobStateFromDescriptor {"DescriptorID":106} + └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"all stages compl..."} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.explain b/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.explain index d91d6761a2f1..dac3d19e5d9e 100644 --- a/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.explain +++ b/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.explain @@ -24,7 +24,7 @@ EXPLAIN (DDL) DROP FUNCTION f; Schema change plan for DROP FUNCTION ‹""›.‹""›.‹f›; ├── StatementPhase │ └── Stage 1 of 1 in StatementPhase - │ ├── 11 elements transitioning toward ABSENT + │ ├── 12 elements transitioning toward ABSENT │ │ ├── PUBLIC → ABSENT Owner:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT UserPrivileges:{DescID: 109 (f-), Name: "admin"} │ │ ├── PUBLIC → ABSENT UserPrivileges:{DescID: 109 (f-), Name: "public"} @@ -35,14 +35,16 @@ Schema change plan for DROP FUNCTION ‹""›.‹""›.‹f›; │ │ ├── PUBLIC → ABSENT FunctionVolatility:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT FunctionLeakProof:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT FunctionNullInputBehavior:{DescID: 109 (f-)} + │ │ ├── PUBLIC → ABSENT FunctionSecurity:{DescID: 109 (f-)} │ │ └── PUBLIC → ABSENT FunctionBody:{DescID: 109 (f-)} - │ └── 13 Mutation operations + │ └── 14 Mutation operations │ ├── MarkDescriptorAsDropped {"DescriptorID":109} │ ├── RemoveObjectParent {"ObjectID":109,"ParentSchemaID":101} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionNam..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionVol..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionLea..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionNul..."} + │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionSec..."} │ ├── RemoveBackReferenceInTypes {"BackReferencedDescriptorID":109} │ ├── RemoveBackReferencesInRelations {"BackReferencedID":109} │ ├── RemoveBackReferenceInFunctions {"BackReferencedDescriptorID":109} @@ -52,7 +54,7 @@ Schema change plan for DROP FUNCTION ‹""›.‹""›.‹f›; │ └── RemoveUserPrivileges {"DescriptorID":109,"User":"root"} ├── PreCommitPhase │ ├── Stage 1 of 2 in PreCommitPhase - │ │ ├── 11 elements transitioning toward ABSENT + │ │ ├── 12 elements transitioning toward ABSENT │ │ │ ├── ABSENT → PUBLIC Owner:{DescID: 109 (f-)} │ │ │ ├── ABSENT → PUBLIC UserPrivileges:{DescID: 109 (f-), Name: "admin"} │ │ │ ├── ABSENT → PUBLIC UserPrivileges:{DescID: 109 (f-), Name: "public"} @@ -63,11 +65,12 @@ Schema change plan for DROP FUNCTION ‹""›.‹""›.‹f›; │ │ │ ├── ABSENT → PUBLIC FunctionVolatility:{DescID: 109 (f-)} │ │ │ ├── ABSENT → PUBLIC FunctionLeakProof:{DescID: 109 (f-)} │ │ │ ├── ABSENT → PUBLIC FunctionNullInputBehavior:{DescID: 109 (f-)} + │ │ │ ├── ABSENT → PUBLIC FunctionSecurity:{DescID: 109 (f-)} │ │ │ └── ABSENT → PUBLIC FunctionBody:{DescID: 109 (f-)} │ │ └── 1 Mutation operation │ │ └── UndoAllInTxnImmediateMutationOpSideEffects │ └── Stage 2 of 2 in PreCommitPhase - │ ├── 11 elements transitioning toward ABSENT + │ ├── 12 elements transitioning toward ABSENT │ │ ├── PUBLIC → ABSENT Owner:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT UserPrivileges:{DescID: 109 (f-), Name: "admin"} │ │ ├── PUBLIC → ABSENT UserPrivileges:{DescID: 109 (f-), Name: "public"} @@ -78,14 +81,16 @@ Schema change plan for DROP FUNCTION ‹""›.‹""›.‹f›; │ │ ├── PUBLIC → ABSENT FunctionVolatility:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT FunctionLeakProof:{DescID: 109 (f-)} │ │ ├── PUBLIC → ABSENT FunctionNullInputBehavior:{DescID: 109 (f-)} + │ │ ├── PUBLIC → ABSENT FunctionSecurity:{DescID: 109 (f-)} │ │ └── PUBLIC → ABSENT FunctionBody:{DescID: 109 (f-)} - │ └── 20 Mutation operations + │ └── 21 Mutation operations │ ├── MarkDescriptorAsDropped {"DescriptorID":109} │ ├── RemoveObjectParent {"ObjectID":109,"ParentSchemaID":101} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionNam..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionVol..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionLea..."} │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionNul..."} + │ ├── NotImplementedForPublicObjects {"DescID":109,"ElementType":"scpb.FunctionSec..."} │ ├── RemoveBackReferenceInTypes {"BackReferencedDescriptorID":109} │ ├── RemoveBackReferencesInRelations {"BackReferencedID":109} │ ├── RemoveBackReferenceInFunctions {"BackReferencedDescriptorID":109} diff --git a/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.side_effects b/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.side_effects index 894e640e8773..1f0cc65c1115 100644 --- a/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.side_effects +++ b/pkg/sql/schemachanger/testdata/end_to_end/drop_function/drop_function.side_effects @@ -40,7 +40,7 @@ write *eventpb.DropFunction to event log: statement: DROP FUNCTION ‹""›.‹""›.‹f› tag: DROP FUNCTION user: root -## StatementPhase stage 1 of 1 with 13 MutationType ops +## StatementPhase stage 1 of 1 with 14 MutationType ops upsert descriptor #101 schema: - functions: @@ -144,7 +144,7 @@ upsert descriptor #109 ## PreCommitPhase stage 1 of 2 with 1 MutationType op undo all catalog changes within txn #1 persist all catalog changes to storage -## PreCommitPhase stage 2 of 2 with 20 MutationType ops +## PreCommitPhase stage 2 of 2 with 21 MutationType ops upsert descriptor #101 schema: - functions: diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index fc4fb8a07541..e37d2acb376d 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -8829,6 +8829,23 @@ specified store on the node it's run from. One of 'mvccGC', 'merge', 'split', Volatility: volatility.Volatile, }, ), + "crdb_internal.get_fully_qualified_table_name": makeBuiltin( + tree.FunctionProperties{Category: builtinconstants.CategorySystemInfo}, + tree.Overload{ + Types: tree.ParamTypes{ + {Name: "table_descriptor_id", Typ: types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Body: ` +SELECT fq_name +FROM crdb_internal.fully_qualified_names +WHERE object_id = table_descriptor_id +`, + Info: `This function is used to get the fully qualified table name given a table descriptor ID`, + Volatility: volatility.Stable, + Language: tree.RoutineLangSQL, + }, + ), } var lengthImpls = func(incBitOverload bool) builtinDefinition { diff --git a/pkg/sql/sem/builtins/fixed_oids.go b/pkg/sql/sem/builtins/fixed_oids.go index 9146a782fa1b..698e61dcc1fc 100644 --- a/pkg/sql/sem/builtins/fixed_oids.go +++ b/pkg/sql/sem/builtins/fixed_oids.go @@ -2607,6 +2607,7 @@ var builtinOidsArray = []string{ 2639: `crdb_internal.start_replication_stream_for_tables(req: bytes) -> bytes`, 2640: `crdb_internal.clear_query_plan_cache() -> void`, 2641: `crdb_internal.clear_table_stats_cache() -> void`, + 2642: `crdb_internal.get_fully_qualified_table_name(table_descriptor_id: int) -> string`, } var builtinOidsBySignature map[string]oid.Oid diff --git a/pkg/sql/sem/builtins/window_frame_builtins_test.go b/pkg/sql/sem/builtins/window_frame_builtins_test.go index 6e782e5dbba8..efad9106d925 100644 --- a/pkg/sql/sem/builtins/window_frame_builtins_test.go +++ b/pkg/sql/sem/builtins/window_frame_builtins_test.go @@ -108,8 +108,8 @@ func testMin(t *testing.T, evalCtx *eval.Context, wfr *eval.WindowFrameRun) { if minResult != naiveMin { t.Errorf("Min sliding window returned wrong result: expected %+v, found %+v", naiveMin, minResult) t.Errorf("partitionSize: %+v idx: %+v offset: %+v", wfr.PartitionSize(), wfr.RowIdx, offset) - t.Errorf(min.sw.string()) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(min.sw.string()) + t.Error(partitionToString(context.Background(), wfr.Rows)) panic("") } } @@ -151,8 +151,8 @@ func testMax(t *testing.T, evalCtx *eval.Context, wfr *eval.WindowFrameRun) { if maxResult != naiveMax { t.Errorf("Max sliding window returned wrong result: expected %+v, found %+v", naiveMax, maxResult) t.Errorf("partitionSize: %+v idx: %+v offset: %+v", wfr.PartitionSize(), wfr.RowIdx, offset) - t.Errorf(max.sw.string()) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(max.sw.string()) + t.Error(partitionToString(context.Background(), wfr.Rows)) panic("") } } @@ -199,7 +199,7 @@ func testSumAndAvg(t *testing.T, evalCtx *eval.Context, wfr *eval.WindowFrameRun if s != naiveSum { t.Errorf("Sum sliding window returned wrong result: expected %+v, found %+v", naiveSum, s) t.Errorf("partitionSize: %+v idx: %+v offset: %+v", wfr.PartitionSize(), wfr.RowIdx, offset) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) panic("") } a, err := avgResult.Float64() @@ -213,7 +213,7 @@ func testSumAndAvg(t *testing.T, evalCtx *eval.Context, wfr *eval.WindowFrameRun if a != float64(naiveSum)/float64(frameSize) { t.Errorf("Sum sliding window returned wrong result: expected %+v, found %+v", float64(naiveSum)/float64(frameSize), a) t.Errorf("partitionSize: %+v idx: %+v offset: %+v", wfr.PartitionSize(), wfr.RowIdx, offset) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) panic("") } } diff --git a/pkg/sql/sem/catconstants/constants.go b/pkg/sql/sem/catconstants/constants.go index 8b2636f2289c..794cc44793c6 100644 --- a/pkg/sql/sem/catconstants/constants.go +++ b/pkg/sql/sem/catconstants/constants.go @@ -106,6 +106,7 @@ const ( MVCCStatistics SystemTableName = "mvcc_statistics" StmtExecInsightsTableName SystemTableName = "statement_execution_insights" TxnExecInsightsTableName SystemTableName = "transaction_execution_insights" + TableMetadata SystemTableName = "table_metadata" ) // Oid for virtual database and table. @@ -215,6 +216,7 @@ const ( CrdbInternalPCRStreamSpansTableID CrdbInternalPCRStreamCheckpointsTableID CrdbInternalLDRProcessorTableID + CrdbInternalFullyQualifiedNamesViewID InformationSchemaID InformationSchemaAdministrableRoleAuthorizationsID InformationSchemaApplicableRolesID diff --git a/pkg/sql/sem/eval/context.go b/pkg/sql/sem/eval/context.go index c914d52952a4..0c91590aeb82 100644 --- a/pkg/sql/sem/eval/context.go +++ b/pkg/sql/sem/eval/context.go @@ -608,8 +608,8 @@ func (ec *Context) GetClusterTimestamp() (*tree.DDecimal, error) { // multiple timestamps. Prevent this with a gate at the SQL level and return // a pgerror until we decide how this will officially behave. See #103245. if ec.TxnIsoLevel.ToleratesWriteSkew() { - treeIso := tree.IsolationLevelFromKVTxnIsolationLevel(ec.TxnIsoLevel) - return nil, pgerror.Newf(pgcode.FeatureNotSupported, "unsupported in %s isolation", treeIso.String()) + return nil, pgerror.Newf(pgcode.FeatureNotSupported, + "unsupported in %s isolation", tree.FromKVIsoLevel(ec.TxnIsoLevel).String()) } ts, err := ec.Txn.CommitTimestamp() diff --git a/pkg/sql/sem/eval/window_funcs_test.go b/pkg/sql/sem/eval/window_funcs_test.go index 0af05bf47617..0f68bf6435e0 100644 --- a/pkg/sql/sem/eval/window_funcs_test.go +++ b/pkg/sql/sem/eval/window_funcs_test.go @@ -100,7 +100,7 @@ func testStartPreceding(t *testing.T, evalCtx *Context, wfr *WindowFrameRun, off if idx != frameStartIdx { t.Errorf("FrameStartIdx returned wrong result on Preceding: expected %+v, found %+v", idx, frameStartIdx) t.Errorf("Search for %+v when wfr.RowIdx=%+v", value, wfr.RowIdx) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) t.Fatal("") } break @@ -144,7 +144,7 @@ func testStartFollowing(t *testing.T, evalCtx *Context, wfr *WindowFrameRun, off if idx != frameStartIdx { t.Errorf("FrameStartIdx returned wrong result on Following: expected %+v, found %+v", idx, frameStartIdx) t.Errorf("Search for %+v when wfr.RowIdx=%+v", value, wfr.RowIdx) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) t.Fatal("") } break @@ -159,7 +159,7 @@ func testStartFollowing(t *testing.T, evalCtx *Context, wfr *WindowFrameRun, off if idx != frameStartIdx { t.Errorf("FrameStartIdx returned wrong result on Following: expected %+v, found %+v", idx, frameStartIdx) t.Errorf("Search for %+v when wfr.RowIdx=%+v", value, wfr.RowIdx) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) t.Fatal("") } break @@ -209,7 +209,7 @@ func testEndPreceding(t *testing.T, evalCtx *Context, wfr *WindowFrameRun, offse if idx+1 != frameEndIdx { t.Errorf("FrameEndIdx returned wrong result on Preceding: expected %+v, found %+v", idx+1, frameEndIdx) t.Errorf("Search for %+v when wfr.RowIdx=%+v", value, wfr.RowIdx) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) t.Fatal("") } break @@ -259,7 +259,7 @@ func testEndFollowing(t *testing.T, evalCtx *Context, wfr *WindowFrameRun, offse if idx+1 != frameEndIdx { t.Errorf("FrameEndIdx returned wrong result on Following: expected %+v, found %+v", idx+1, frameEndIdx) t.Errorf("Search for %+v when wfr.RowIdx=%+v", value, wfr.RowIdx) - t.Errorf(partitionToString(context.Background(), wfr.Rows)) + t.Error(partitionToString(context.Background(), wfr.Rows)) t.Fatal("") } break diff --git a/pkg/sql/sem/tree/create.go b/pkg/sql/sem/tree/create.go index 00862a601173..db8cb7e9e503 100644 --- a/pkg/sql/sem/tree/create.go +++ b/pkg/sql/sem/tree/create.go @@ -2184,7 +2184,6 @@ func (node *CreateExternalConnection) Format(ctx *FmtCtx) { type CreateTenant struct { IfNotExists bool TenantSpec *TenantSpec - Like *LikeTenantSpec } // Format implements the NodeFormatter interface. @@ -2194,20 +2193,6 @@ func (node *CreateTenant) Format(ctx *FmtCtx) { ctx.WriteString("IF NOT EXISTS ") } ctx.FormatNode(node.TenantSpec) - ctx.FormatNode(node.Like) -} - -// LikeTenantSpec represents a LIKE clause in CREATE VIRTUAL CLUSTER. -type LikeTenantSpec struct { - OtherTenant *TenantSpec -} - -func (node *LikeTenantSpec) Format(ctx *FmtCtx) { - if node.OtherTenant == nil { - return - } - ctx.WriteString(" LIKE ") - ctx.FormatNode(node.OtherTenant) } // CreateTenantFromReplication represents a CREATE VIRTUAL CLUSTER...FROM REPLICATION @@ -2228,8 +2213,6 @@ type CreateTenantFromReplication struct { ReplicationSourceAddress Expr Options TenantReplicationOptions - - Like *LikeTenantSpec } // TenantReplicationOptions options for the CREATE/ALTER VIRTUAL CLUSTER FROM REPLICATION command. @@ -2249,9 +2232,6 @@ func (node *CreateTenantFromReplication) Format(ctx *FmtCtx) { // NB: we do not anonymize the tenant name because we assume that tenant names // do not contain sensitive information. ctx.FormatNode(node.TenantSpec) - if node.Like != nil { - ctx.FormatNode(node.Like) - } if node.ReplicationSourceAddress != nil { ctx.WriteString(" FROM REPLICATION OF ") diff --git a/pkg/sql/sem/tree/create_logical_replication.go b/pkg/sql/sem/tree/create_logical_replication.go index 9db2651d0c85..968d11d861b0 100644 --- a/pkg/sql/sem/tree/create_logical_replication.go +++ b/pkg/sql/sem/tree/create_logical_replication.go @@ -30,10 +30,11 @@ type LogicalReplicationResources struct { type LogicalReplicationOptions struct { // Mapping of table name to UDF name - UserFunctions map[UnresolvedName]RoutineName - Cursor Expr - Mode Expr - DefaultFunction Expr + UserFunctions map[UnresolvedName]RoutineName + Cursor Expr + Mode Expr + DefaultFunction Expr + IgnoreCDCIgnoredTTLDeletes *DBool } var _ Statement = &CreateLogicalReplicationStream{} @@ -124,6 +125,10 @@ func (lro *LogicalReplicationOptions) Format(ctx *FmtCtx) { ctx.FormatNode(&k) } } + if lro.IgnoreCDCIgnoredTTLDeletes != nil && *lro.IgnoreCDCIgnoredTTLDeletes { + maybeAddSep() + ctx.WriteString("IGNORE_CDC_IGNORED_TTL_DELETES") + } } func (o *LogicalReplicationOptions) CombineWith(other *LogicalReplicationOptions) error { @@ -163,6 +168,14 @@ func (o *LogicalReplicationOptions) CombineWith(other *LogicalReplicationOptions } } + if o.IgnoreCDCIgnoredTTLDeletes != nil { + if other.IgnoreCDCIgnoredTTLDeletes != nil { + return errors.New("IGNORE_CDC_IGNORED_TTL_DELETES option specified multiple times") + } + } else { + o.IgnoreCDCIgnoredTTLDeletes = other.IgnoreCDCIgnoredTTLDeletes + } + return nil } @@ -172,5 +185,6 @@ func (o LogicalReplicationOptions) IsDefault() bool { return o.Cursor == options.Cursor && o.Mode == options.Mode && o.DefaultFunction == options.DefaultFunction && - o.UserFunctions == nil + o.UserFunctions == nil && + o.IgnoreCDCIgnoredTTLDeletes == options.IgnoreCDCIgnoredTTLDeletes } diff --git a/pkg/sql/sem/tree/create_routine.go b/pkg/sql/sem/tree/create_routine.go index 7d1660924f77..602023c06199 100644 --- a/pkg/sql/sem/tree/create_routine.go +++ b/pkg/sql/sem/tree/create_routine.go @@ -203,6 +203,7 @@ func (RoutineVolatility) routineOption() {} func (RoutineLeakproof) routineOption() {} func (RoutineBodyStr) routineOption() {} func (RoutineLanguage) routineOption() {} +func (RoutineSecurity) routineOption() {} // RoutineNullInputBehavior represent the UDF property on null parameters. type RoutineNullInputBehavior int @@ -310,6 +311,33 @@ func AsRoutineLanguage(lang string) (RoutineLanguage, error) { return RoutineLanguage(lang), nil } +// RoutineSecurity indicates the mode of security that the routine will +// be executed with. +type RoutineSecurity int + +const ( + // RoutineInvoker indicates that the routine is run with the privileges of + // the user invoking it. This is the default security mode if none is + // provided. + RoutineInvoker RoutineSecurity = iota + // RoutineDefiner indicates that the routine is run with the privileges of + // the user who defined it. + RoutineDefiner +) + +// Format implements the NodeFormatter interface. +func (node RoutineSecurity) Format(ctx *FmtCtx) { + ctx.WriteString("SECURITY ") + switch node { + case RoutineInvoker: + ctx.WriteString("INVOKER") + case RoutineDefiner: + ctx.WriteString("DEFINER") + default: + panic(pgerror.New(pgcode.InvalidParameterValue, "unknown routine option")) + } +} + // RoutineBodyStr is a string containing all statements in a UDF body. type RoutineBodyStr string @@ -640,7 +668,7 @@ func ComputedColumnExprContext(isVirtual bool) SchemaExprContext { // ValidateRoutineOptions checks whether there are conflicting or redundant // routine options in the given slice. func ValidateRoutineOptions(options RoutineOptions, isProc bool) error { - var hasLang, hasBody, hasLeakProof, hasVolatility, hasNullInputBehavior bool + var hasLang, hasBody, hasLeakProof, hasVolatility, hasNullInputBehavior, hasSecurity bool conflictingErr := func(opt RoutineOption) error { return errors.Wrapf(ErrConflictingRoutineOption, "%s", AsString(opt)) } @@ -680,6 +708,11 @@ func ValidateRoutineOptions(options RoutineOptions, isProc bool) error { return conflictingErr(option) } hasNullInputBehavior = true + case RoutineSecurity: + if hasSecurity { + return conflictingErr(option) + } + hasSecurity = true default: return pgerror.Newf(pgcode.InvalidParameterValue, "unknown function option: ", AsString(option)) } diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index 95f1668549aa..8e31b19fc7ea 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -87,8 +87,14 @@ var ( // postgres 4714 BC (JULIAN = 0) - 4713 in their docs - and 294276 AD. // MaxSupportedTime is the maximum time we support parsing. + // + // Refer to the doc comments of the function "timeutil.Unix" for the process of + // deriving the arguments to construct a specific time.Time. MaxSupportedTime = timeutil.Unix(9224318016000-1, 999999000) // 294276-12-31 23:59:59.999999 // MinSupportedTime is the minimum time we support parsing. + // + // Refer to the doc comments of the function "timeutil.Unix" for the process of + // deriving the arguments to construct a specific time.Time. MinSupportedTime = timeutil.Unix(-210866803200, 0) // 4714-11-24 00:00:00+00 BC ) @@ -2592,12 +2598,11 @@ type DTimestamp struct { } // MakeDTimestamp creates a DTimestamp with specified precision. -func MakeDTimestamp(t time.Time, precision time.Duration) (*DTimestamp, error) { - ret := t.Round(precision) - if ret.After(MaxSupportedTime) || ret.Before(MinSupportedTime) { - return nil, NewTimestampExceedsBoundsError(ret) +func MakeDTimestamp(t time.Time, precision time.Duration) (_ *DTimestamp, err error) { + if t, err = checkTimeBounds(t, precision); err != nil { + return nil, err } - return &DTimestamp{Time: ret}, nil + return &DTimestamp{Time: t}, nil } // MustMakeDTimestamp wraps MakeDTimestamp but panics if there is an error. @@ -2871,6 +2876,10 @@ type DTimestampTZ struct { func checkTimeBounds(t time.Time, precision time.Duration) (time.Time, error) { ret := t.Round(precision) if ret.After(MaxSupportedTime) || ret.Before(MinSupportedTime) { + if t == pgdate.TimeInfinity || t == pgdate.TimeNegativeInfinity { + return t, nil + } + return time.Time{}, NewTimestampExceedsBoundsError(ret) } return ret, nil diff --git a/pkg/sql/sem/tree/pgwire_encode.go b/pkg/sql/sem/tree/pgwire_encode.go index 576c28c48c22..9fe96b746110 100644 --- a/pkg/sql/sem/tree/pgwire_encode.go +++ b/pkg/sql/sem/tree/pgwire_encode.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/timeofday" "github.com/cockroachdb/cockroach/pkg/util/timetz" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" "github.com/lib/pq/oid" ) @@ -334,6 +335,13 @@ func PGWireFormatTimeTZ(t timetz.TimeTZ, tmp []byte) []byte { // PGWireFormatTimestamp formats t into a format lib/pq understands. // If offset is not nil, it will not display the timezone offset. func PGWireFormatTimestamp(t time.Time, offset *time.Location, tmp []byte) (b []byte) { + if t == pgdate.TimeInfinity { + return []byte("infinity") + } + if t == pgdate.TimeNegativeInfinity { + return []byte("-infinity") + } + format := PGTimeStampFormatNoOffset if offset != nil { format = PGTimeStampFormat diff --git a/pkg/sql/sem/tree/show.go b/pkg/sql/sem/tree/show.go index 6cadeedeb769..d70114e00131 100644 --- a/pkg/sql/sem/tree/show.go +++ b/pkg/sql/sem/tree/show.go @@ -1160,11 +1160,21 @@ func (node *ShowFingerprints) Format(ctx *FmtCtx) { // ShowFingerprintOptions describes options for the SHOW EXPERIMENTAL_FINGERPINT // execution. type ShowFingerprintOptions struct { - StartTimestamp Expr + StartTimestamp Expr + ExcludedUserColumns StringOrPlaceholderOptList } func (s *ShowFingerprintOptions) Format(ctx *FmtCtx) { + var addSep bool + maybeAddSep := func() { + if addSep { + ctx.WriteString(", ") + } + addSep = true + } + if s.StartTimestamp != nil { + maybeAddSep() ctx.WriteString("START TIMESTAMP = ") _, canOmitParentheses := s.StartTimestamp.(alreadyDelimitedAsSyntacticDExpr) if !canOmitParentheses { @@ -1175,6 +1185,11 @@ func (s *ShowFingerprintOptions) Format(ctx *FmtCtx) { ctx.WriteByte(')') } } + if s.ExcludedUserColumns != nil { + maybeAddSep() + ctx.WriteString("EXCLUDE COLUMNS = ") + s.ExcludedUserColumns.Format(ctx) + } } // CombineWith merges other TenantReplicationOptions into this struct. @@ -1188,13 +1203,18 @@ func (s *ShowFingerprintOptions) CombineWith(other *ShowFingerprintOptions) erro s.StartTimestamp = other.StartTimestamp } + var err error + s.ExcludedUserColumns, err = combineStringOrPlaceholderOptList(s.ExcludedUserColumns, other.ExcludedUserColumns, "excluded_user_columns") + if err != nil { + return err + } return nil } // IsDefault returns true if this backup options struct has default value. func (s ShowFingerprintOptions) IsDefault() bool { options := ShowFingerprintOptions{} - return s.StartTimestamp == options.StartTimestamp + return s.StartTimestamp == options.StartTimestamp && cmp.Equal(s.ExcludedUserColumns, options.ExcludedUserColumns) } var _ NodeFormatter = &ShowFingerprintOptions{} diff --git a/pkg/sql/sem/tree/txn.go b/pkg/sql/sem/tree/txn.go index 6e42ece16cea..561c1e22aa55 100644 --- a/pkg/sql/sem/tree/txn.go +++ b/pkg/sql/sem/tree/txn.go @@ -64,22 +64,74 @@ func (i IsolationLevel) String() string { return isolationLevelNames[i] } -// IsolationLevelFromKVTxnIsolationLevel converts a kv level isolation.Level to -// its SQL semantic equivalent. -func IsolationLevelFromKVTxnIsolationLevel(level isolation.Level) IsolationLevel { - var ret IsolationLevel +// ToKVIsoLevel converts an IsolationLevel to its isolation.Level equivalent. +func (i IsolationLevel) ToKVIsoLevel() isolation.Level { + switch i { + case ReadUncommittedIsolation, ReadCommittedIsolation: + return isolation.ReadCommitted + case RepeatableReadIsolation, SnapshotIsolation: + return isolation.Snapshot + case SerializableIsolation: + return isolation.Serializable + default: + panic(fmt.Sprintf("unknown isolation level: %s", i)) + } +} + +// FromKVIsoLevel converts an isolation.Level to its SQL semantic equivalent. +func FromKVIsoLevel(level isolation.Level) IsolationLevel { switch level { - case isolation.Serializable: - ret = SerializableIsolation case isolation.ReadCommitted: - ret = ReadCommittedIsolation + return ReadCommittedIsolation case isolation.Snapshot: - ret = SnapshotIsolation + return RepeatableReadIsolation + case isolation.Serializable: + return SerializableIsolation + default: + panic(fmt.Sprintf("unknown isolation level: %s", level)) + } +} + +// UpgradeToEnabledLevel upgrades the isolation level to the weakest enabled +// isolation level that is stronger than or equal to the input level. +func (i IsolationLevel) UpgradeToEnabledLevel( + allowReadCommitted, allowRepeatableRead, hasLicense bool, +) (_ IsolationLevel, upgraded, upgradedDueToLicense bool) { + switch i { + case ReadUncommittedIsolation: + // READ UNCOMMITTED is mapped to READ COMMITTED. PostgreSQL also does + // this: https://www.postgresql.org/docs/current/transaction-iso.html. + upgraded = true + fallthrough + case ReadCommittedIsolation: + // READ COMMITTED is only allowed if the cluster setting is enabled and the + // cluster has a license. Otherwise, it is mapped to a stronger isolation + // level (REPEATABLE READ if enabled, SERIALIZABLE otherwise). + if allowReadCommitted && hasLicense { + return ReadCommittedIsolation, upgraded, upgradedDueToLicense + } + upgraded = true + if allowReadCommitted && !hasLicense { + upgradedDueToLicense = true + } + fallthrough + case RepeatableReadIsolation, SnapshotIsolation: + // REPEATABLE READ and SNAPSHOT are considered aliases. The isolation levels + // are only allowed if the cluster setting is enabled and the cluster has a + // license. Otherwise, they are mapped to SERIALIZABLE. + if allowRepeatableRead && hasLicense { + return RepeatableReadIsolation, upgraded, upgradedDueToLicense + } + upgraded = true + if allowRepeatableRead && !hasLicense { + upgradedDueToLicense = true + } + fallthrough + case SerializableIsolation: + return SerializableIsolation, upgraded, upgradedDueToLicense default: - panic("What to do here? Log is a banned import") - // log.Fatalf(context.Background(), "unknown isolation level: %s", level) + panic(fmt.Sprintf("unknown isolation level: %s", i)) } - return ret } // UserPriority holds the user priority for a transaction. diff --git a/pkg/sql/sem/tree/txn_test.go b/pkg/sql/sem/tree/txn_test.go index 2c72e10a2c34..2096996d7268 100644 --- a/pkg/sql/sem/tree/txn_test.go +++ b/pkg/sql/sem/tree/txn_test.go @@ -19,28 +19,113 @@ import ( "github.com/stretchr/testify/require" ) -func TestIsolationLevelFromKVTxnIsolationLevel(t *testing.T) { +func TestToKVIsoLevel(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { - In isolation.Level - Out tree.IsolationLevel + in tree.IsolationLevel + out isolation.Level }{ - { - In: isolation.Serializable, - Out: tree.SerializableIsolation, - }, - { - In: isolation.ReadCommitted, - Out: tree.ReadCommittedIsolation, - }, - { - In: isolation.Snapshot, - Out: tree.SnapshotIsolation, - }, + {tree.ReadUncommittedIsolation, isolation.ReadCommitted}, + {tree.ReadCommittedIsolation, isolation.ReadCommitted}, + {tree.RepeatableReadIsolation, isolation.Snapshot}, + {tree.SnapshotIsolation, isolation.Snapshot}, + {tree.SerializableIsolation, isolation.Serializable}, } for _, tc := range testCases { - require.Equal(t, tc.Out, tree.IsolationLevelFromKVTxnIsolationLevel(tc.In)) + t.Run("", func(t *testing.T) { + require.Equal(t, tc.out, tc.in.ToKVIsoLevel()) + }) + } +} + +func TestFromKVIsoLevel(t *testing.T) { + defer leaktest.AfterTest(t)() + + testCases := []struct { + in isolation.Level + out tree.IsolationLevel + }{ + {isolation.ReadCommitted, tree.ReadCommittedIsolation}, + {isolation.Snapshot, tree.RepeatableReadIsolation}, + {isolation.Serializable, tree.SerializableIsolation}, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + require.Equal(t, tc.out, tree.FromKVIsoLevel(tc.in)) + }) + } +} + +func TestUpgradeToEnabledLevel(t *testing.T) { + defer leaktest.AfterTest(t)() + + const RU = tree.ReadUncommittedIsolation + const RC = tree.ReadCommittedIsolation + const RR = tree.RepeatableReadIsolation + const SI = tree.SnapshotIsolation + const SER = tree.SerializableIsolation + + testCases := []struct { + in tree.IsolationLevel + allowRC bool + allowRR bool + license bool + expOut tree.IsolationLevel + expUpgraded bool + expUpgradedDueToLicense bool + }{ + {in: RU, allowRC: false, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RU, allowRC: true, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RU, allowRC: false, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RU, allowRC: true, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RU, allowRC: false, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RU, allowRC: true, allowRR: false, license: true, expOut: RC, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RU, allowRC: false, allowRR: true, license: true, expOut: RR, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RU, allowRC: true, allowRR: true, license: true, expOut: RC, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RC, allowRC: false, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RC, allowRC: true, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RC, allowRC: false, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RC, allowRC: true, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RC, allowRC: false, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RC, allowRC: true, allowRR: false, license: true, expOut: RC, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: RC, allowRC: false, allowRR: true, license: true, expOut: RR, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RC, allowRC: true, allowRR: true, license: true, expOut: RC, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: RR, allowRC: false, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RR, allowRC: true, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RR, allowRC: false, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RR, allowRC: true, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: RR, allowRC: false, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RR, allowRC: true, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: RR, allowRC: false, allowRR: true, license: true, expOut: RR, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: RR, allowRC: true, allowRR: true, license: true, expOut: RR, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SI, allowRC: false, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: SI, allowRC: true, allowRR: false, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: SI, allowRC: false, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: SI, allowRC: true, allowRR: true, license: false, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: true}, + {in: SI, allowRC: false, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: SI, allowRC: true, allowRR: false, license: true, expOut: SER, expUpgraded: true, expUpgradedDueToLicense: false}, + {in: SI, allowRC: false, allowRR: true, license: true, expOut: RR, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SI, allowRC: true, allowRR: true, license: true, expOut: RR, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: false, allowRR: false, license: false, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: true, allowRR: false, license: false, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: false, allowRR: true, license: false, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: true, allowRR: true, license: false, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: false, allowRR: false, license: true, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: true, allowRR: false, license: true, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: false, allowRR: true, license: true, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + {in: SER, allowRC: true, allowRR: true, license: true, expOut: SER, expUpgraded: false, expUpgradedDueToLicense: false}, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + res, upgraded, upgradedDueToLicense := tc.in.UpgradeToEnabledLevel( + tc.allowRC, tc.allowRR, tc.license) + require.Equal(t, tc.expOut, res) + require.Equal(t, tc.expUpgraded, upgraded) + require.Equal(t, tc.expUpgradedDueToLicense, upgradedDueToLicense) + }) } } diff --git a/pkg/sql/sem/tree/walk.go b/pkg/sql/sem/tree/walk.go index c7eed182d673..26038a58e519 100644 --- a/pkg/sql/sem/tree/walk.go +++ b/pkg/sql/sem/tree/walk.go @@ -1074,31 +1074,9 @@ func (n *AlterTenantReplication) walkStmt(v Visitor) Statement { return ret } -// copyNode makes a copy of this node without recursing. -func (n *LikeTenantSpec) copyNode() *LikeTenantSpec { - nodeCopy := *n - return &nodeCopy -} - -// copyNode makes a copy of this Statement without recursing in any child Statements. -func (n *CreateTenant) copyNode() *CreateTenant { - stmtCopy := *n - return &stmtCopy -} - // walkStmt is part of the walkableStmt interface. func (n *CreateTenant) walkStmt(v Visitor) Statement { ret := n - if n.Like.OtherTenant != nil { - ts, changed := walkTenantSpec(v, n.TenantSpec) - if changed { - if ret == n { - ret = n.copyNode() - } - ret.Like = n.Like.copyNode() - ret.Like.OtherTenant = ts - } - } return ret } @@ -1143,16 +1121,6 @@ func (n *CreateTenantFromReplication) walkStmt(v Visitor) Statement { ret.Options.ExpirationWindow = e } } - if n.Like.OtherTenant != nil { - ts, changed := walkTenantSpec(v, n.TenantSpec) - if changed { - if ret == n { - ret = n.copyNode() - } - ret.Like = n.Like.copyNode() - ret.Like.OtherTenant = ts - } - } return ret } diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index a44c9a1dda01..5c244fbc93db 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -660,7 +660,7 @@ func maybeAddSequenceDependencies( // Check if this reference is cross DB. if seqDesc.GetParentID() != tableDesc.GetParentID() && !allowCrossDatabaseSeqReferences.Get(&st.SV) { - return nil, errors.WithHintf( + return nil, errors.WithHint( pgerror.Newf(pgcode.FeatureNotSupported, "sequence references cannot come from other databases; (see the '%s' cluster setting)", allowCrossDatabaseSeqReferencesSetting), diff --git a/pkg/sql/serial.go b/pkg/sql/serial.go index e5f4808aaf2f..76e94a1aea82 100644 --- a/pkg/sql/serial.go +++ b/pkg/sql/serial.go @@ -19,8 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" @@ -132,14 +130,13 @@ func (p *planner) generateSerialInColumnDef( error, ) { - if err := assertValidSerialColumnDef(d, tableName); err != nil { + if err := catalog.AssertValidSerialColumnDef(d, tableName); err != nil { return nil, nil, nil, nil, err } newSpec := *d - // Make the column non-nullable in all cases. PostgreSQL requires - // this. + // Column is non-nullable in all cases. PostgreSQL requires this. newSpec.Nullable.Nullability = tree.NotNull // Clear the IsSerial bit now that it's been remapped. @@ -356,7 +353,7 @@ func SimplifySerialInColumnDefWithRowID( return nil } - if err := assertValidSerialColumnDef(d, tableName); err != nil { + if err := catalog.AssertValidSerialColumnDef(d, tableName); err != nil { return err } @@ -374,30 +371,3 @@ func SimplifySerialInColumnDefWithRowID( return nil } - -func assertValidSerialColumnDef(d *tree.ColumnTableDef, tableName *tree.TableName) error { - if d.HasDefaultExpr() { - // SERIAL implies a new default expression, we can't have one to - // start with. This is the error produced by pg in such case. - return pgerror.Newf(pgcode.Syntax, - "multiple default values specified for column %q of table %q", - tree.ErrString(&d.Name), tree.ErrString(tableName)) - } - - if d.Nullable.Nullability == tree.Null { - // SERIAL implies a non-NULL column, we can't accept a nullability - // spec. This is the error produced by pg in such case. - return pgerror.Newf(pgcode.Syntax, - "conflicting NULL/NOT NULL declarations for column %q of table %q", - tree.ErrString(&d.Name), tree.ErrString(tableName)) - } - - if d.Computed.Expr != nil { - // SERIAL cannot be a computed column. - return pgerror.Newf(pgcode.Syntax, - "SERIAL column %q of table %q cannot be computed", - tree.ErrString(&d.Name), tree.ErrString(tableName)) - } - - return nil -} diff --git a/pkg/sql/sessiondatapb/local_only_session_data.go b/pkg/sql/sessiondatapb/local_only_session_data.go index b24c584d7ede..f30bf578e6c1 100644 --- a/pkg/sql/sessiondatapb/local_only_session_data.go +++ b/pkg/sql/sessiondatapb/local_only_session_data.go @@ -292,11 +292,11 @@ const ( // TTLStatsLow denotes a QoS level used internally by the TTL feature, which // is not settable as a session default_transaction_quality_of_service value. - TTLStatsLow = QoSLevel(admissionpb.TTLLowPri) + TTLStatsLow = QoSLevel(admissionpb.BulkLowPri) // TTLLow denotes a QoS level used internally by the TTL feature, which is not // settable as a session default_transaction_quality_of_service value. - TTLLow = QoSLevel(admissionpb.TTLLowPri) + TTLLow = QoSLevel(admissionpb.BulkLowPri) // UserLow denotes an end user QoS level lower than the default. UserLow = QoSLevel(admissionpb.UserLowPri) diff --git a/pkg/sql/sessiondatapb/local_only_session_data.proto b/pkg/sql/sessiondatapb/local_only_session_data.proto index 6e1a3b9f0efd..ddea6abd47e3 100644 --- a/pkg/sql/sessiondatapb/local_only_session_data.proto +++ b/pkg/sql/sessiondatapb/local_only_session_data.proto @@ -545,6 +545,9 @@ message LocalOnlySessionData { // hoisting a volatile expression that is conditionally executed by a CASE, // COALESCE, or IFERR expression. bool optimizer_use_conditional_hoist_fix = 138; + // OptimizerPushLimitIntoProjectFilteredScan, when true, indicates that the + // optimizer should push limit expressions into projects of filtered scans. + bool optimizer_push_limit_into_project_filtered_scan = 139; /////////////////////////////////////////////////////////////////////////// // WARNING: consider whether a session parameter you're adding needs to // diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index 986cdbbbd229..05660ed5bdf9 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -807,14 +807,14 @@ func toSettingString( if ok { return settings.EncodeInt(v), nil } - return "", errors.WithHintf(errors.Errorf("invalid integer value '%d' for enum setting", *i), setting.GetAvailableValuesAsHint()) + return "", errors.WithHint(errors.Errorf("invalid integer value '%d' for enum setting", *i), setting.GetAvailableValuesAsHint()) } else if s, ok := d.(*tree.DString); ok { str := string(*s) v, ok := setting.ParseEnum(str) if ok { return settings.EncodeInt(v), nil } - return "", errors.WithHintf(errors.Errorf("invalid string value '%s' for enum setting", str), setting.GetAvailableValuesAsHint()) + return "", errors.WithHint(errors.Errorf("invalid string value '%s' for enum setting", str), setting.GetAvailableValuesAsHint()) } return "", errors.Errorf("cannot use %s %T value for enum setting, must be int or string", d.ResolvedType(), d) case *settings.ByteSizeSetting: diff --git a/pkg/sql/set_session_characteristics.go b/pkg/sql/set_session_characteristics.go index 0171a8e95a87..d6b64eaf5d75 100644 --- a/pkg/sql/set_session_characteristics.go +++ b/pkg/sql/set_session_characteristics.go @@ -24,47 +24,18 @@ import ( func (p *planner) SetSessionCharacteristics( ctx context.Context, n *tree.SetSessionCharacteristics, ) (planNode, error) { - originalLevel := n.Modes.Isolation - upgradedLevel := false - upgradedDueToLicense := false allowReadCommitted := allowReadCommittedIsolation.Get(&p.execCfg.Settings.SV) - allowSnapshot := allowSnapshotIsolation.Get(&p.execCfg.Settings.SV) + allowRepeatableRead := allowRepeatableReadIsolation.Get(&p.execCfg.Settings.SV) hasLicense := base.CCLDistributionAndEnterpriseEnabled(p.ExecCfg().Settings) if err := p.sessionDataMutatorIterator.applyOnEachMutatorError(func(m sessionDataMutator) error { // Note: We also support SET DEFAULT_TRANSACTION_ISOLATION TO ' .... '. - switch n.Modes.Isolation { - case tree.UnspecifiedIsolation: - // Nothing to do. - case tree.ReadUncommittedIsolation: - upgradedLevel = true - fallthrough - case tree.ReadCommittedIsolation: - level := tree.SerializableIsolation - if allowReadCommitted && hasLicense { - level = tree.ReadCommittedIsolation - } else { - upgradedLevel = true - if allowReadCommitted && !hasLicense { - upgradedDueToLicense = true - } - } - m.SetDefaultTransactionIsolationLevel(level) - case tree.RepeatableReadIsolation: - upgradedLevel = true - fallthrough - case tree.SnapshotIsolation: - level := tree.SerializableIsolation - if allowSnapshot && hasLicense { - level = tree.SnapshotIsolation - } else { - upgradedLevel = true - if allowSnapshot && !hasLicense { - upgradedDueToLicense = true - } + if n.Modes.Isolation != tree.UnspecifiedIsolation { + level, upgraded, upgradedDueToLicense := n.Modes.Isolation.UpgradeToEnabledLevel( + allowReadCommitted, allowRepeatableRead, hasLicense) + if f := p.sessionDataMutatorIterator.upgradedIsolationLevel; upgraded && f != nil { + f(ctx, n.Modes.Isolation, upgradedDueToLicense) } m.SetDefaultTransactionIsolationLevel(level) - default: - m.SetDefaultTransactionIsolationLevel(n.Modes.Isolation) } // Note: We also support SET DEFAULT_TRANSACTION_PRIORITY TO ' .... '. @@ -117,9 +88,5 @@ func (p *planner) SetSessionCharacteristics( "unsupported default deferrable mode: %s", n.Modes.Deferrable) } - if f := p.sessionDataMutatorIterator.upgradedIsolationLevel; upgradedLevel && f != nil { - f(ctx, originalLevel, upgradedDueToLicense) - } - return newZeroNode(nil /* columns */), nil } diff --git a/pkg/sql/show_cluster_setting.go b/pkg/sql/show_cluster_setting.go index 1ed61478527b..0e842caafa9b 100644 --- a/pkg/sql/show_cluster_setting.go +++ b/pkg/sql/show_cluster_setting.go @@ -141,10 +141,15 @@ func checkClusterSettingValuesAreEquivalent(localRawVal, kvRawVal []byte) error } decodedLocal, localVal, localOk := maybeDecodeVersion(localRawVal) decodedKV, kvVal, kvOk := maybeDecodeVersion(kvRawVal) - if localOk && kvOk && decodedLocal.Internal%2 == 1 /* isFence */ { - predecessor := decodedLocal - predecessor.Internal-- - if predecessor.Equal(decodedKV) { + if localOk && kvOk && decodedLocal.IsFence() { + // NB: The internal version is -1 for the fence version of all final cluster + // versions. In these cases, we cannot simply check that the local version + // is off-by-one from the KV version, since (for example's sake) we would be + // comparing (24,1,12) to (24,2,-1). Instead, we can use ListBetween to + // verify that there are no cluster versions in between the local and KV + // versions. + versionsBetween := clusterversion.ListBetween(decodedKV.Version, decodedLocal.Version) + if len(versionsBetween) == 0 { return nil } } diff --git a/pkg/sql/show_cluster_setting_test.go b/pkg/sql/show_cluster_setting_test.go index b6f5ef8c4df6..c68bf2b0af1c 100644 --- a/pkg/sql/show_cluster_setting_test.go +++ b/pkg/sql/show_cluster_setting_test.go @@ -11,6 +11,7 @@ package sql import ( + "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/clusterversion" @@ -59,6 +60,21 @@ func TestCheckClusterSettingValuesAreEquivalent(t *testing.T) { kv: encode(t, "22.2-upgrading-to-23.1-step-011"), exp: "value differs between local setting ([]) and KV (22.2-upgrading-to-23.1-step-011)", }, + { // 5 + // NB: On release branches, clusterversion.Latest will have a fence + // version that has -1 for the internal version. + local: encode(t, clusterversion.Latest.Version().FenceVersion().String()), + kv: encode(t, (clusterversion.Latest - 1).Version().String()), + }, + { // 6 + local: encode(t, clusterversion.Latest.Version().String()), + kv: encode(t, (clusterversion.Latest - 1).Version().String()), + exp: fmt.Sprintf( + "value differs between local setting (%s) and KV (%s)", + clusterversion.ClusterVersion{Version: clusterversion.Latest.Version()}, + clusterversion.ClusterVersion{Version: (clusterversion.Latest - 1).Version()}, + ), + }, } { t.Run("", func(t *testing.T) { err := checkClusterSettingValuesAreEquivalent(tc.local, tc.kv) diff --git a/pkg/sql/show_fingerprints.go b/pkg/sql/show_fingerprints.go index d6516ac8a05d..225c3c927c49 100644 --- a/pkg/sql/show_fingerprints.go +++ b/pkg/sql/show_fingerprints.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" + "github.com/cockroachdb/cockroach/pkg/sql/exprutil" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -71,8 +72,27 @@ type showFingerprintsNode struct { func (p *planner) ShowFingerprints( ctx context.Context, n *tree.ShowFingerprints, ) (planNode, error) { + + op := "SHOW EXPERIMENTAL_FINGERPRINTS" + evalOptions, err := evalShowFingerprintOptions(ctx, n.Options, p.EvalContext(), p.SemaCtx(), + op, p.ExprEvaluator(op)) + if err != nil { + return nil, err + } + if n.TenantSpec != nil { - return p.planShowTenantFingerprint(ctx, n.TenantSpec, n.Options) + // Tenant fingerprints use the KV fingerprint method and can't exclude columns this way + if evalOptions.excludedUserColumns != nil { + err = pgerror.New(pgcode.InvalidParameterValue, "cannot use the EXCLUDE COLUMNS option when fingerprinting a tenant.") + return nil, err + } + return p.planShowTenantFingerprint(ctx, n.TenantSpec, evalOptions) + } + + // Only allow this for virtual clusters as it uses the KV fingerprint method instead of SQL + if !evalOptions.startTimestamp.IsEmpty() { + err = pgerror.New(pgcode.InvalidParameterValue, "cannot use the START TIMESTAMP option when fingerprinting a table.") + return nil, err } // We avoid the cache so that we can observe the fingerprints without @@ -91,19 +111,22 @@ func (p *planner) ShowFingerprints( columns: colinfo.ShowFingerprintsColumns, tableDesc: tableDesc, indexes: tableDesc.ActiveIndexes(), + options: evalOptions, }, nil } type resolvedShowTenantFingerprintOptions struct { - startTimestamp hlc.Timestamp + startTimestamp hlc.Timestamp + excludedUserColumns []string } -func evalShowTenantFingerprintOptions( +func evalShowFingerprintOptions( ctx context.Context, options tree.ShowFingerprintOptions, evalCtx *eval.Context, semaCtx *tree.SemaContext, op string, + eval exprutil.Evaluator, ) (*resolvedShowTenantFingerprintOptions, error) { r := &resolvedShowTenantFingerprintOptions{} if options.StartTimestamp != nil { @@ -114,11 +137,21 @@ func evalShowTenantFingerprintOptions( r.startTimestamp = ts } + if options.ExcludedUserColumns != nil { + cols, err := eval.StringArray( + ctx, tree.Exprs(options.ExcludedUserColumns)) + + if err != nil { + return nil, err + } + r.excludedUserColumns = cols + } + return r, nil } func (p *planner) planShowTenantFingerprint( - ctx context.Context, ts *tree.TenantSpec, options tree.ShowFingerprintOptions, + ctx context.Context, ts *tree.TenantSpec, evalOptions *resolvedShowTenantFingerprintOptions, ) (planNode, error) { if err := CanManageTenant(ctx, p); err != nil { return nil, err @@ -133,12 +166,6 @@ func (p *planner) planShowTenantFingerprint( return nil, err } - evalOptions, err := evalShowTenantFingerprintOptions(ctx, options, p.EvalContext(), p.SemaCtx(), - "SHOW EXPERIMENTAL_FINGERPRINTS FROM VIRTUAL CLUSTER") - if err != nil { - return nil, err - } - return &showFingerprintsNode{ columns: colinfo.ShowTenantFingerprintsColumns, tenantSpec: tspec, @@ -290,7 +317,11 @@ func (n *showFingerprintsNode) Next(params runParams) (bool, error) { return false, nil } index := n.indexes[n.run.rowIdx] - sql, err := BuildFingerprintQueryForIndex(n.tableDesc, index, []string{}) + excludedColumns := []string{} + if n.options != nil && len(n.options.excludedUserColumns) > 0 { + excludedColumns = append(excludedColumns, n.options.excludedUserColumns...) + } + sql, err := BuildFingerprintQueryForIndex(n.tableDesc, index, excludedColumns) if err != nil { return false, err } diff --git a/pkg/sql/show_logical_replication_jobs.go b/pkg/sql/show_logical_replication_jobs.go deleted file mode 100644 index ad6729edc75e..000000000000 --- a/pkg/sql/show_logical_replication_jobs.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2024 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package sql - -import ( - "context" - - "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/types" -) - -var showLogicalReplicationJobsCols = colinfo.ResultColumns{ - {Name: "job_id", Typ: types.Int}, - {Name: "targets", Typ: types.StringArray}, - {Name: "status", Typ: types.String}, - {Name: "replicated_time", Typ: types.TimestampTZ}, -} - -var withDetailsCols = colinfo.ResultColumns{ - {Name: "replicated_start_time", Typ: types.TimestampTZ}, -} - -type showLogicalReplicationJobsNode struct { - name string - columns colinfo.ResultColumns - withDetails bool -} - -func (p *planner) ShowLogicalReplicationJobs( - ctx context.Context, n *tree.ShowLogicalReplicationJobs, -) (planNode, error) { - // TODO(azhu): implement - node := &showLogicalReplicationJobsNode{ - name: n.String(), - withDetails: n.WithDetails, - columns: showLogicalReplicationJobsCols, - } - if n.WithDetails { - node.columns = append(node.columns, withDetailsCols...) - } - return node, nil - -} - -func (n *showLogicalReplicationJobsNode) startExec(params runParams) error { - // TODO(azhu): implement - return nil -} - -func (n *showLogicalReplicationJobsNode) Next(params runParams) (bool, error) { - // TODO(azhu): implement - return false, nil -} - -func (n *showLogicalReplicationJobsNode) Values() tree.Datums { - // TODO(azhu): implement - return tree.Datums{} -} - -func (n *showLogicalReplicationJobsNode) Close(_ context.Context) { - // TODO(azhu): implement -} diff --git a/pkg/sql/sqlinstance/instancestorage/instancereader.go b/pkg/sql/sqlinstance/instancestorage/instancereader.go index 85e3e773e8d8..0996462c278c 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancereader.go +++ b/pkg/sql/sqlinstance/instancestorage/instancereader.go @@ -135,6 +135,8 @@ func makeInstanceInfo(row instancerow) sqlinstance.InstanceInfo { SessionID: row.sessionID, Locality: row.locality, BinaryVersion: row.binaryVersion, + Region: row.region, + IsDraining: row.isDraining, } } diff --git a/pkg/sql/sqlinstance/instancestorage/instancereader_test.go b/pkg/sql/sqlinstance/instancestorage/instancereader_test.go index 94b556d66290..8fde234101d0 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancereader_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancereader_test.go @@ -217,7 +217,7 @@ func TestReader(t *testing.T) { if err != nil { return errors.Wrapf(err, "%s", name) } - sortInstances(instances) + instancestorage.SortInstances(instances) return errors.Wrapf(testOutputFn(exp, instances), "%s", name) } verifyInstances := func(t *testing.T, exp expectations) error { diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage.go b/pkg/sql/sqlinstance/instancestorage/instancestorage.go index c3b4ddc936de..14cc5c7da11c 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage.go @@ -19,6 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" @@ -104,6 +105,7 @@ type instancerow struct { sessionID sqlliveness.SessionID locality roachpb.Locality binaryVersion roachpb.Version + isDraining bool timestamp hlc.Timestamp } @@ -181,28 +183,69 @@ func (s *Storage) CreateInstance( return s.createInstanceRow(ctx, session, rpcAddr, sqlAddr, locality, binaryVersion, noNodeID) } -// ReleaseInstance deallocates the instance id iff it is currently owned by the -// provided sessionID. -func (s *Storage) ReleaseInstance( +// getKeyAndInstance is a helper method to form key from session id and instance +// id and get the value with that key. +func (s *Storage) getKeyAndInstance( + ctx context.Context, sessionID sqlliveness.SessionID, instanceID base.SQLInstanceID, txn *kv.Txn, +) (roachpb.Key, instancerow, error) { + instance := instancerow{} + region, _, err := slstorage.UnsafeDecodeSessionID(sessionID) + if err != nil { + return nil, instance, errors.Wrap(err, "unable to determine region for sql_instance") + } + + key := s.rowCodec.encodeKey(region, instanceID) + kv, err := txn.Get(ctx, key) + if err != nil { + return nil, instance, err + } + + instance, err = s.rowCodec.decodeRow(kv.Key, kv.Value) + if err != nil { + return nil, instance, err + } + + return key, instance, nil +} + +// SetInstanceDraining sets the is_draining column of sql_instances system table +// to true. +func (s *Storage) SetInstanceDraining( ctx context.Context, sessionID sqlliveness.SessionID, instanceID base.SQLInstanceID, ) error { return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - region, _, err := slstorage.UnsafeDecodeSessionID(sessionID) + key, n, err := s.getKeyAndInstance(ctx, sessionID, instanceID, txn) if err != nil { - return errors.Wrap(err, "unable to determine region for sql_instance") + return err } + // TODO: When can be instance.sessionID unequal sessionID? - key := s.rowCodec.encodeKey(region, instanceID) - kv, err := txn.Get(ctx, key) + batch := txn.NewBatch() + encodeIsDraining, err := s.shouldEncodeIsDraining(ctx, txn) if err != nil { return err } - - instance, err := s.rowCodec.decodeRow(kv.Key, kv.Value) + value, err := s.rowCodec.encodeValue( + n.rpcAddr, n.sqlAddr, n.sessionID, n.locality, n.binaryVersion, + encodeIsDraining, true /* isDraining */) if err != nil { return err } + batch.Put(key, value) + return txn.CommitInBatch(ctx, batch) + }) +} +// ReleaseInstance deallocates the instance id iff it is currently owned by the +// provided sessionID. +func (s *Storage) ReleaseInstance( + ctx context.Context, sessionID sqlliveness.SessionID, instanceID base.SQLInstanceID, +) error { + return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + key, instance, err := s.getKeyAndInstance(ctx, sessionID, instanceID, txn) + if err != nil { + return err + } if instance.sessionID != sessionID { // Great! The session was already released or released and // claimed by another server. @@ -210,17 +253,28 @@ func (s *Storage) ReleaseInstance( } batch := txn.NewBatch() - - value, err := s.rowCodec.encodeAvailableValue() + encodeIsDraining, err := s.shouldEncodeIsDraining(ctx, txn) + if err != nil { + return err + } + value, err := s.rowCodec.encodeAvailableValue(encodeIsDraining) if err != nil { return err } batch.Put(key, value) - return txn.CommitInBatch(ctx, batch) }) } +func (s *Storage) shouldEncodeIsDraining(ctx context.Context, txn *kv.Txn) (bool, error) { + guard, err := s.settingsWatch.MakeVersionGuard( + ctx, txn, clusterversion.V24_3_SQLInstancesAddDraining) + if err != nil { + return false, err + } + return guard.IsActive(clusterversion.V24_3_SQLInstancesAddDraining), nil +} + func (s *Storage) createInstanceRow( ctx context.Context, session sqlliveness.Session, @@ -281,7 +335,13 @@ func (s *Storage) createInstanceRow( b := txn.NewBatch() - value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, session.ID(), locality, binaryVersion) + encodeIsDraining, err := s.shouldEncodeIsDraining(ctx, txn) + if err != nil { + return err + } + value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, + session.ID(), locality, binaryVersion, + encodeIsDraining, false /* isDraining */) if err != nil { return err } @@ -417,8 +477,12 @@ func (s *Storage) reclaimRegion(ctx context.Context, region []byte) error { toReclaim, toDelete := idsToReclaim(target, instances, isExpired) writeBatch := txn.NewBatch() + encodeIsDraining, err := s.shouldEncodeIsDraining(ctx, txn) + if err != nil { + return err + } for _, instance := range toReclaim { - availableValue, err := s.rowCodec.encodeAvailableValue() + availableValue, err := s.rowCodec.encodeAvailableValue(encodeIsDraining) if err != nil { return err } @@ -656,8 +720,13 @@ func (s *Storage) generateAvailableInstanceRowsWithTxn( } b := txn.NewBatch() + + encodeIsDraining, err := s.shouldEncodeIsDraining(ctx, txn) + if err != nil { + return err + } for _, row := range idsToAllocate(target, regions, onlineInstances) { - value, err := s.rowCodec.encodeAvailableValue() + value, err := s.rowCodec.encodeAvailableValue(encodeIsDraining) if err != nil { return errors.Wrapf(err, "failed to encode row for instance id %d", row.instanceID) } diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go b/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go index 504024f8ae13..abc35e360b0a 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go @@ -95,6 +95,8 @@ func TestGetAvailableInstanceIDForRegion(t *testing.T) { sessionExpiry, roachpb.Locality{}, roachpb.Version{}, + /* encodeIsDraining */ true, + /* isDraining */ false, )) } @@ -344,6 +346,8 @@ func TestReclaimAndGenerateInstanceRows(t *testing.T) { sessionExpiry, roachpb.Locality{}, roachpb.Version{}, + /* encodeIsDraining */ true, + /* isDraining */ false, )) } for _, i := range []int{2, 3} { @@ -476,6 +480,9 @@ func claim( require.NoError(t, err) require.NoError(t, slStorage.Insert(ctx, sessionID, sessionExpiration)) require.NoError(t, storage.CreateInstanceDataForTest( - ctx, region, instanceID, rpcAddr, sqlAddr, sessionID, sessionExpiration, roachpb.Locality{}, roachpb.Version{}, + ctx, region, instanceID, rpcAddr, sqlAddr, sessionID, + sessionExpiration, roachpb.Locality{}, roachpb.Version{}, + /* encodeIsDraining */ true, + /* isDraining */ false, )) } diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go b/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go index 36e61f022903..1edc17807297 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go @@ -15,7 +15,6 @@ import ( gosql "database/sql" "fmt" "math/rand" - "sort" "sync" "testing" "time" @@ -145,15 +144,16 @@ func TestStorage(t *testing.T) { equalInstance := func(t *testing.T, expect sqlinstance.InstanceInfo, actual sqlinstance.InstanceInfo) { require.Equal(t, expect.InstanceID, actual.InstanceID) - require.Equal(t, actual.SessionID, actual.SessionID) - require.Equal(t, actual.InstanceRPCAddr, actual.InstanceRPCAddr) - require.Equal(t, actual.InstanceSQLAddr, actual.InstanceSQLAddr) - require.Equal(t, actual.Locality, actual.Locality) - require.Equal(t, actual.BinaryVersion, actual.BinaryVersion) + require.Equal(t, expect.SessionID, actual.SessionID) + require.Equal(t, expect.InstanceRPCAddr, actual.InstanceRPCAddr) + require.Equal(t, expect.InstanceSQLAddr, actual.InstanceSQLAddr) + require.Equal(t, expect.Locality, actual.Locality) + require.Equal(t, expect.BinaryVersion, actual.BinaryVersion) + require.Equal(t, expect.IsDraining, actual.IsDraining) } isAvailable := func(t *testing.T, instance sqlinstance.InstanceInfo, id base.SQLInstanceID) { - require.Equal(t, sqlinstance.InstanceInfo{InstanceID: id}, instance) + require.Equal(t, sqlinstance.InstanceInfo{InstanceID: id, Region: enum.One}, instance) } var initialInstances []sqlinstance.InstanceInfo @@ -169,7 +169,7 @@ func TestStorage(t *testing.T) { // Verify all instances are returned by GetAllInstancesDataForTest. { instances, err := storage.GetAllInstancesDataForTest(ctx) - sortInstances(instances) + instancestorage.SortInstances(instances) require.NoError(t, err) require.Equal(t, preallocatedCount, len(instances)) for _, i := range []int{0, 1, 2} { @@ -188,7 +188,7 @@ func TestStorage(t *testing.T) { // Verify all instances are returned by GetAllInstancesDataForTest. { instances, err := storage.GetAllInstancesDataForTest(ctx) - sortInstances(instances) + instancestorage.SortInstances(instances) require.NoError(t, err) require.Equal(t, preallocatedCount, len(instances)) for i := range instances { @@ -207,7 +207,7 @@ func TestStorage(t *testing.T) { instances, err := storage.GetAllInstancesDataForTest(ctx) require.NoError(t, err) require.Equal(t, preallocatedCount, len(instances)) - sortInstances(instances) + instancestorage.SortInstances(instances) for i, instance := range instances { if i == 0 { @@ -231,7 +231,7 @@ func TestStorage(t *testing.T) { instances, err := storage.GetAllInstancesDataForTest(ctx) require.NoError(t, err) - sortInstances(instances) + instancestorage.SortInstances(instances) require.Equal(t, len(initialInstances), len(instances)) for index, instance := range instances { @@ -598,7 +598,7 @@ func TestReclaimLoop(t *testing.T) { if err != nil { return err } - sortInstances(instances) + instancestorage.SortInstances(instances) if len(instances) == 0 { return errors.New("instances have not been generated yet") } @@ -645,6 +645,8 @@ func TestReclaimLoop(t *testing.T) { sessionExpiry, localities[i], binaryVersions[i], + /* encodeIsDraining */ true, + /* isDraining */ false, )) } @@ -663,7 +665,7 @@ func TestReclaimLoop(t *testing.T) { if err != nil { return err } - sortInstances(instances) + instancestorage.SortInstances(instances) if len(instances) == preallocatedCount { return errors.New("new instances have not been generated yet") } @@ -689,9 +691,3 @@ func TestReclaimLoop(t *testing.T) { } } } - -func sortInstances(instances []sqlinstance.InstanceInfo) { - sort.SliceStable(instances, func(idx1, idx2 int) bool { - return instances[idx1].InstanceID < instances[idx2].InstanceID - }) -} diff --git a/pkg/sql/sqlinstance/instancestorage/row_codec.go b/pkg/sql/sqlinstance/instancestorage/row_codec.go index 2b3be0bece7e..bc5c1aeeb4b1 100644 --- a/pkg/sql/sqlinstance/instancestorage/row_codec.go +++ b/pkg/sql/sqlinstance/instancestorage/row_codec.go @@ -58,7 +58,7 @@ type rowCodec struct { type valueColumnIdx int -const numValueColumns = 5 +const numValueColumns = 6 const ( addrColumnIdx valueColumnIdx = iota @@ -66,6 +66,7 @@ const ( localityColumnIdx sqlAddrColumnIdx binaryVersionColumnIdx + isDrainingColumnIdx // Ensure we have the right number of value columns. _ uint = iota - numValueColumns @@ -78,6 +79,7 @@ var valueColumnNames = [numValueColumns]string{ localityColumnIdx: "locality", sqlAddrColumnIdx: "sql_addr", binaryVersionColumnIdx: "binary_version", + isDrainingColumnIdx: "is_draining", } // rbrKeyCodec is used by the regional by row compatible sql_instances index format. @@ -200,7 +202,7 @@ func (d *rowCodec) decodeRow(key roachpb.Key, value *roachpb.Value) (instancerow return r, nil } - r.rpcAddr, r.sqlAddr, r.sessionID, r.locality, r.binaryVersion, r.timestamp, err = d.decodeValue(*value) + r.rpcAddr, r.sqlAddr, r.sessionID, r.locality, r.binaryVersion, r.isDraining, r.timestamp, err = d.decodeValue(*value) if err != nil { return instancerow{}, errors.Wrapf(err, "failed to decode value for: %v", key) } @@ -215,9 +217,11 @@ func (d *rowCodec) encodeValue( sessionID sqlliveness.SessionID, locality roachpb.Locality, binaryVersion roachpb.Version, + encodeIsDraining bool, + isDraining bool, ) (*roachpb.Value, error) { var valueBuf []byte - columnsToEncode := [numValueColumns]func() tree.Datum{ + columnsToEncode := []func() tree.Datum{ addrColumnIdx: func() tree.Datum { if rpcAddr == "" { return tree.DNull @@ -248,6 +252,13 @@ func (d *rowCodec) encodeValue( return tree.NewDString(clusterversion.StringForPersistence(binaryVersion)) }, } + + if encodeIsDraining { + columnsToEncode = append(columnsToEncode, func() tree.Datum { + return tree.MakeDBool(tree.DBool(isDraining)) + }) + } + for i, f := range columnsToEncode { var err error var prev descpb.ColumnID @@ -265,8 +276,9 @@ func (d *rowCodec) encodeValue( return v, nil } -func (d *rowCodec) encodeAvailableValue() (*roachpb.Value, error) { - value, err := d.encodeValue("", "", sqlliveness.SessionID([]byte{}), roachpb.Locality{}, roachpb.Version{}) +func (d *rowCodec) encodeAvailableValue(encodeIsDraining bool) (*roachpb.Value, error) { + value, err := d.encodeValue("", "", sqlliveness.SessionID([]byte{}), + roachpb.Locality{}, roachpb.Version{}, encodeIsDraining, false) if err != nil { return nil, errors.Wrap(err, "failed to encode available sql_instances value") } @@ -282,17 +294,18 @@ func (d *rowCodec) decodeValue( sessionID sqlliveness.SessionID, locality roachpb.Locality, binaryVersion roachpb.Version, + isDraining bool, timestamp hlc.Timestamp, _ error, ) { // The rest of the columns are stored as a single family. bytes, err := value.GetTuple() if err != nil { - return "", "", "", roachpb.Locality{}, roachpb.Version{}, hlc.Timestamp{}, err + return "", "", "", roachpb.Locality{}, roachpb.Version{}, false, hlc.Timestamp{}, err } datums, err := d.decoder.Decode(&tree.DatumAlloc{}, bytes) if err != nil { - return "", "", "", roachpb.Locality{}, roachpb.Version{}, hlc.Timestamp{}, err + return "", "", "", roachpb.Locality{}, roachpb.Version{}, false, hlc.Timestamp{}, err } for i, f := range [numValueColumns]func(datum tree.Datum) error{ addrColumnIdx: func(datum tree.Datum) error { @@ -345,6 +358,14 @@ func (d *rowCodec) decodeValue( } return nil }, + isDrainingColumnIdx: func(datum tree.Datum) error { + if datum == tree.DNull { + isDraining = false + } else { + isDraining = bool(tree.MustBeDBool(datum)) + } + return nil + }, } { ord := d.valueColumnOrdinals[i] // Deal with the fact that new columns may not yet have been added. @@ -353,8 +374,8 @@ func (d *rowCodec) decodeValue( datum = datums[ord] } if err := f(datum); err != nil { - return "", "", "", roachpb.Locality{}, roachpb.Version{}, hlc.Timestamp{}, err + return "", "", "", roachpb.Locality{}, roachpb.Version{}, false, hlc.Timestamp{}, err } } - return rpcAddr, sqlAddr, sessionID, locality, binaryVersion, value.Timestamp, nil + return rpcAddr, sqlAddr, sessionID, locality, binaryVersion, isDraining, value.Timestamp, nil } diff --git a/pkg/sql/sqlinstance/instancestorage/test_helpers.go b/pkg/sql/sqlinstance/instancestorage/test_helpers.go index ee9e8a1bcace..84c3ca5d3999 100644 --- a/pkg/sql/sqlinstance/instancestorage/test_helpers.go +++ b/pkg/sql/sqlinstance/instancestorage/test_helpers.go @@ -14,6 +14,7 @@ package instancestorage import ( "context" + "sort" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv" @@ -87,6 +88,8 @@ func (s *Storage) CreateInstanceDataForTest( sessionExpiration hlc.Timestamp, locality roachpb.Locality, binaryVersion roachpb.Version, + encodeIsDraining bool, + isDraining bool, ) error { ctx = multitenant.WithTenantCostControlExemption(ctx) return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -98,7 +101,10 @@ func (s *Storage) CreateInstanceDataForTest( } key := s.rowCodec.encodeKey(region, instanceID) - value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) + + value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, + sessionID, locality, binaryVersion, + true /* encodeIsDraining */, isDraining) if err != nil { return err } @@ -122,7 +128,7 @@ func (s *Storage) GetInstanceDataForTest( if row.Value == nil { return sqlinstance.InstanceInfo{}, sqlinstance.NonExistentInstanceError } - rpcAddr, sqlAddr, sessionID, locality, binaryVersion, _, err := s.rowCodec.decodeValue(*row.Value) + rpcAddr, sqlAddr, sessionID, locality, binaryVersion, isDraining, _, err := s.rowCodec.decodeValue(*row.Value) if err != nil { return sqlinstance.InstanceInfo{}, errors.Wrapf(err, "could not decode data for instance %d", instanceID) } @@ -133,6 +139,7 @@ func (s *Storage) GetInstanceDataForTest( SessionID: sessionID, Locality: locality, BinaryVersion: binaryVersion, + IsDraining: isDraining, } return instanceInfo, nil } @@ -153,3 +160,10 @@ func (s *Storage) GetAllInstancesDataForTest( } return makeInstanceInfos(rows), nil } + +// SortInstances sorts instances by their id. +func SortInstances(instances []sqlinstance.InstanceInfo) { + sort.Slice(instances, func(idx1, idx2 int) bool { + return instances[idx1].InstanceID < instances[idx2].InstanceID + }) +} diff --git a/pkg/sql/sqlinstance/sqlinstance.go b/pkg/sql/sqlinstance/sqlinstance.go index 6eaa4ccd495e..0e7ce88b93db 100644 --- a/pkg/sql/sqlinstance/sqlinstance.go +++ b/pkg/sql/sqlinstance/sqlinstance.go @@ -37,6 +37,7 @@ type InstanceInfo struct { SessionID sqlliveness.SessionID Locality roachpb.Locality BinaryVersion roachpb.Version + IsDraining bool } func (ii InstanceInfo) GetInstanceID() base.SQLInstanceID { diff --git a/pkg/sql/sqlitelogictest/tests/fakedist-disk/generated_test.go b/pkg/sql/sqlitelogictest/tests/fakedist-disk/generated_test.go index 13e4b4497199..f652ccd85bcd 100644 --- a/pkg/sql/sqlitelogictest/tests/fakedist-disk/generated_test.go +++ b/pkg/sql/sqlitelogictest/tests/fakedist-disk/generated_test.go @@ -32,7 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 6 +const configIdx = 7 var sqliteLogicTestDir string diff --git a/pkg/sql/sqlitelogictest/tests/fakedist-vec-off/generated_test.go b/pkg/sql/sqlitelogictest/tests/fakedist-vec-off/generated_test.go index d47ec358e625..bfe8b1b41096 100644 --- a/pkg/sql/sqlitelogictest/tests/fakedist-vec-off/generated_test.go +++ b/pkg/sql/sqlitelogictest/tests/fakedist-vec-off/generated_test.go @@ -32,7 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 5 +const configIdx = 6 var sqliteLogicTestDir string diff --git a/pkg/sql/sqlitelogictest/tests/fakedist/generated_test.go b/pkg/sql/sqlitelogictest/tests/fakedist/generated_test.go index 1fedfbff42a2..bd9c8b01f575 100644 --- a/pkg/sql/sqlitelogictest/tests/fakedist/generated_test.go +++ b/pkg/sql/sqlitelogictest/tests/fakedist/generated_test.go @@ -32,7 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 4 +const configIdx = 5 var sqliteLogicTestDir string diff --git a/pkg/sql/sqlitelogictest/tests/local-mixed-24.1/generated_test.go b/pkg/sql/sqlitelogictest/tests/local-mixed-24.1/generated_test.go index 4b6fcd6fce42..d8bfdf5bb742 100644 --- a/pkg/sql/sqlitelogictest/tests/local-mixed-24.1/generated_test.go +++ b/pkg/sql/sqlitelogictest/tests/local-mixed-24.1/generated_test.go @@ -32,7 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 18 +const configIdx = 19 var sqliteLogicTestDir string diff --git a/pkg/sql/sqlitelogictest/tests/local-mixed-24.2/generated_test.go b/pkg/sql/sqlitelogictest/tests/local-mixed-24.2/generated_test.go index c521eb02ff32..d1c2997f462d 100644 --- a/pkg/sql/sqlitelogictest/tests/local-mixed-24.2/generated_test.go +++ b/pkg/sql/sqlitelogictest/tests/local-mixed-24.2/generated_test.go @@ -32,7 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/randutil" ) -const configIdx = 19 +const configIdx = 20 var sqliteLogicTestDir string diff --git a/pkg/sql/sqlitelogictest/tests/local-repeatable-read/BUILD.bazel b/pkg/sql/sqlitelogictest/tests/local-repeatable-read/BUILD.bazel new file mode 100644 index 000000000000..be245adeb672 --- /dev/null +++ b/pkg/sql/sqlitelogictest/tests/local-repeatable-read/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "local-repeatable-read_test", + size = "enormous", + srcs = ["generated_test.go"], + data = [ + "//c-deps:libgeos", # keep + "@com_github_cockroachdb_sqllogictest//:testfiles", # keep + ], + exec_properties = {"test.Pool": "default"}, + shard_count = 48, + tags = ["cpu:1"], + deps = [ + "//pkg/base", + "//pkg/build/bazel", + "//pkg/security/securityassets", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/sql/logictest", + "//pkg/sql/sqlitelogictest", + "//pkg/testutils/serverutils", + "//pkg/testutils/skip", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "//pkg/util/randutil", + ], +) diff --git a/pkg/sql/sqlitelogictest/tests/local-repeatable-read/generated_test.go b/pkg/sql/sqlitelogictest/tests/local-repeatable-read/generated_test.go new file mode 100644 index 000000000000..70a1243ac963 --- /dev/null +++ b/pkg/sql/sqlitelogictest/tests/local-repeatable-read/generated_test.go @@ -0,0 +1,1439 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Code generated by generate-logictest, DO NOT EDIT. + +package testlocal_repeatable_read + +import ( + "flag" + "os" + "path/filepath" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/build/bazel" + "github.com/cockroachdb/cockroach/pkg/security/securityassets" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/sql/logictest" + "github.com/cockroachdb/cockroach/pkg/sql/sqlitelogictest" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +const configIdx = 4 + +var sqliteLogicTestDir string + +func init() { +} + +func TestMain(m *testing.M) { + flag.Parse() + if *logictest.Bigtest { + if bazel.BuiltWithBazel() { + var err error + sqliteLogicTestDir, err = bazel.Runfile("external/com_github_cockroachdb_sqllogictest") + if err != nil { + panic(err) + } + } else { + var err error + sqliteLogicTestDir, err = sqlitelogictest.FindLocalLogicTestClone() + if err != nil { + panic(err) + } + } + } + securityassets.SetLoader(securitytest.EmbeddedAssets) + randutil.SeedForTests() + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + + defer serverutils.TestingSetDefaultTenantSelectionOverride( + base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(76378), + )() + + os.Exit(m.Run()) +} + +func runSqliteLogicTest(t *testing.T, file string) { + skip.UnderDeadlock(t, "times out and/or hangs") + if !*logictest.Bigtest { + skip.IgnoreLint(t, "-bigtest flag must be specified to run this test") + } + // SQLLite logic tests can be very memory intensive, so we give them larger + // limit than other logic tests get. Also some of the 'delete' files become + // extremely slow when MVCC range tombstones are enabled for point deletes, + // so we disable that. + serverArgs := logictest.TestServerArgs{ + MaxSQLMemoryLimit: 512 << 20, // 512 MiB + DisableUseMVCCRangeTombstonesForPointDeletes: true, + // Some sqlite tests with very low bytes limit value are too slow, so + // ensure 3 KiB lower bound. + BatchBytesLimitLowerBound: 3 << 10, // 3 KiB + } + logictest.RunLogicTest(t, serverArgs, configIdx, filepath.Join(sqliteLogicTestDir, file)) +} + +func TestSqlLiteLogic_testindexbetween1slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/1/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexbetween10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexbetween100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexbetween100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexbetween100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexbetween100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexbetween100slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/100/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexbetween1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/between/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_10_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_10.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_11_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_11.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_12_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_12.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_13_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_13.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_14_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_14.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_15_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_15.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_16_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_16.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_17_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_17.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_18_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_18.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_19_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_19.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_20_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_20.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_21_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_21.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_22_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_22.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_23_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_23.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_24_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_24.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_25_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_25.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_26_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_26.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_27_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_27.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_28_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_28.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_29_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_29.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_30_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_30.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_31_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_31.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_32_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_32.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_33_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_33.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_34_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_34.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_7_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_7.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_8_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_8.test") +} + +func TestSqlLiteLogic_testindexcommute10slt_good_9_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/10/slt_good_9.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_10_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_10.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_11_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_11.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_12_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_12.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_7_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_7.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_8_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_8.test") +} + +func TestSqlLiteLogic_testindexcommute100slt_good_9_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/100/slt_good_9.test") +} + +func TestSqlLiteLogic_testindexcommute1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexcommute1000slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/1000/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexcommute1000slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/1000/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexcommute1000slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/commute/1000/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexdelete1slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/1/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexdelete10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexdelete100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexdelete100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexdelete100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexdelete100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexdelete1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexdelete1000slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/1000/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexdelete10000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/delete/10000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexin10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexin100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexin100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexin100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexin100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexin100slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/100/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexin1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexin1000slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/in/1000/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_10_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_10.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_11_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_11.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_12_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_12.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_13_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_13.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_14_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_14.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_15_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_15.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_16_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_16.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_17_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_17.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_18_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_18.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_19_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_19.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_20_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_20.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_21_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_21.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_22_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_22.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_23_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_23.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_24_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_24.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_25_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_25.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_7_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_7.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_8_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_8.test") +} + +func TestSqlLiteLogic_testindexorderby10slt_good_9_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/10/slt_good_9.test") +} + +func TestSqlLiteLogic_testindexorderby100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexorderby100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexorderby100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexorderby1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_10_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_10.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_11_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_11.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_12_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_12.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_13_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_13.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_14_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_14.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_15_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_15.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_16_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_16.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_17_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_17.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_18_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_18.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_19_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_19.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_20_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_20.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_21_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_21.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_22_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_22.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_23_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_23.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_24_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_24.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_25_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_25.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_26_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_26.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_27_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_27.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_28_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_28.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_29_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_29.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_30_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_30.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_31_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_31.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_32_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_32.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_33_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_33.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_34_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_34.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_35_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_35.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_36_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_36.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_37_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_37.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_38_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_38.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_39_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_39.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_7_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_7.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_8_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_8.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort10slt_good_9_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/10/slt_good_9.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort100slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/100/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexorderby_nosort1000slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/orderby_nosort/1000/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_6_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_6.test") +} + +func TestSqlLiteLogic_testindexview10slt_good_7_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10/slt_good_7.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_1.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_2.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_3.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_4.test") +} + +func TestSqlLiteLogic_testindexview100slt_good_5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/100/slt_good_5.test") +} + +func TestSqlLiteLogic_testindexview1000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/1000/slt_good_0.test") +} + +func TestSqlLiteLogic_testindexview10000slt_good_0_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/index/view/10000/slt_good_0.test") +} + +func TestSqlLiteLogic_testselect1_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/select1.test") +} + +func TestSqlLiteLogic_testselect2_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/select2.test") +} + +func TestSqlLiteLogic_testselect3_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/select3.test") +} + +func TestSqlLiteLogic_testselect4_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/select4.test") +} + +func TestSqlLiteLogic_testselect5_test( + t *testing.T, +) { + defer leaktest.AfterTest(t)() + runSqliteLogicTest(t, "/test/select5.test") +} diff --git a/pkg/sql/sqlliveness/slinstance/BUILD.bazel b/pkg/sql/sqlliveness/slinstance/BUILD.bazel index 1098aa450150..59fb16c4ff5c 100644 --- a/pkg/sql/sqlliveness/slinstance/BUILD.bazel +++ b/pkg/sql/sqlliveness/slinstance/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slinstance", visibility = ["//visibility:public"], deps = [ + "//pkg/kv/kvpb", "//pkg/settings/cluster", "//pkg/sql/enum", "//pkg/sql/sqlliveness", diff --git a/pkg/sql/sqlliveness/slinstance/slinstance.go b/pkg/sql/sqlliveness/slinstance/slinstance.go index 34a4efcaee96..31cb012b3136 100644 --- a/pkg/sql/sqlliveness/slinstance/slinstance.go +++ b/pkg/sql/sqlliveness/slinstance/slinstance.go @@ -19,6 +19,7 @@ import ( "sync" "time" + "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/enum" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" @@ -199,14 +200,6 @@ func (l *Instance) createSession(ctx context.Context) (*session, error) { if l.currentRegion != nil { region = l.currentRegion } - id, err := slstorage.MakeSessionID(region, uuid.MakeV4()) - if err != nil { - return nil, err - } - - s := &session{ - id: id, - } opts := retry.Options{ InitialBackoff: 10 * time.Millisecond, @@ -214,7 +207,17 @@ func (l *Instance) createSession(ctx context.Context) (*session, error) { Multiplier: 1.5, } everySecond := log.Every(time.Second) + var err error + s := &session{} for i, r := 0, retry.StartWithCtx(ctx, opts); r.Next(); { + // Allocate a new session ID initially or if we hit + // an ambiguous result error. + if len(s.id) == 0 { + s.id, err = slstorage.MakeSessionID(region, uuid.MakeV4()) + if err != nil { + return nil, err + } + } // If we fail to insert the session, then reset the start time // and expiration, since otherwise there is a danger of inserting // an expired session. @@ -236,6 +239,14 @@ func (l *Instance) createSession(ctx context.Context) (*session, error) { if grpcutil.IsAuthError(err) { break } + // Previous insert was ambiguous, so select a new session ID, + // since there may be a row that exists. + if errors.HasType(err, (*kvpb.AmbiguousResultError)(nil)) { + log.Infof(ctx, + "failed to create a session due to an ambiguous result error: %s", + s.ID().String()) + s.id = "" + } continue } break diff --git a/pkg/sql/sqlliveness/slinstance/slinstance_test.go b/pkg/sql/sqlliveness/slinstance/slinstance_test.go index 9aec12cc43a7..0960d42f221d 100644 --- a/pkg/sql/sqlliveness/slinstance/slinstance_test.go +++ b/pkg/sql/sqlliveness/slinstance/slinstance_test.go @@ -60,6 +60,7 @@ func TestSQLInstance(t *testing.T) { numRetries int initialTimestamp hlc.Timestamp nextTimestamp hlc.Timestamp + lastSessionID sqlliveness.SessionID } fakeStorage.SetInjectedFailure(func(sid sqlliveness.SessionID, expiration hlc.Timestamp) error { failureMu.Lock() @@ -68,17 +69,21 @@ func TestSQLInstance(t *testing.T) { if failureMu.numRetries == 1 { failureMu.initialTimestamp = expiration return kvpb.NewReplicaUnavailableError(errors.Newf("fake injected error"), &roachpb.RangeDescriptor{}, roachpb.ReplicaDescriptor{}) + } else if failureMu.numRetries == 2 { + failureMu.lastSessionID = sid + return kvpb.NewAmbiguousResultError(errors.Newf("fake injected error")) } failureMu.nextTimestamp = expiration return nil }) sqlInstance.Start(ctx, nil) - // We expect two attempts to insert, since we inject a replica unavailable - // error on the first attempt. + // We expect three attempts to insert, since we inject a replica unavailable + // error on the first attempt. On the second attempt we will inject an ambiguous + // result error. The third and final attempt will be successful. testutils.SucceedsSoon(t, func() error { failureMu.Lock() defer failureMu.Unlock() - if failureMu.numRetries < 2 { + if failureMu.numRetries < 3 { return errors.AssertionFailedf("unexpected number of retries on session insertion, "+ "expected at least 2, got %d", failureMu.numRetries) } @@ -86,6 +91,14 @@ func TestSQLInstance(t *testing.T) { return errors.AssertionFailedf("timestamp should move forward on each retry, "+ "got %s. Previous timestamp was: %s", failureMu.nextTimestamp, failureMu.initialTimestamp) } + session, err := sqlInstance.Session(ctx) + if err != nil { + return err + } + if session.ID() == failureMu.lastSessionID || len(failureMu.lastSessionID) == 0 { + return errors.AssertionFailedf("new session ID should have been assigned after an ambiguous"+ + " result error. Current: %s Previous: %s", session.ID(), failureMu.lastSessionID) + } return nil }) diff --git a/pkg/sql/sqlstats/BUILD.bazel b/pkg/sql/sqlstats/BUILD.bazel index f79e09f8ddb1..15b92c2fc5f7 100644 --- a/pkg/sql/sqlstats/BUILD.bazel +++ b/pkg/sql/sqlstats/BUILD.bazel @@ -21,7 +21,6 @@ go_library( "//pkg/sql/execstats", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlstats/insights", "//pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil", "//pkg/util/log", "//pkg/util/metric", diff --git a/pkg/sql/sqlstats/insights/BUILD.bazel b/pkg/sql/sqlstats/insights/BUILD.bazel index fe161b45623b..c1cb1b123dbd 100644 --- a/pkg/sql/sqlstats/insights/BUILD.bazel +++ b/pkg/sql/sqlstats/insights/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "provider.go", "registry.go", "store.go", + "test_utils.go", ], embed = [":insights_go_proto"], importpath = "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights", diff --git a/pkg/sql/sqlstats/insights/ingester.go b/pkg/sql/sqlstats/insights/ingester.go index 64c342c10e18..1462b637f901 100644 --- a/pkg/sql/sqlstats/insights/ingester.go +++ b/pkg/sql/sqlstats/insights/ingester.go @@ -40,7 +40,8 @@ type ConcurrentBufferIngester struct { registry *lockingRegistry clearRegistry uint32 - closeCh chan struct{} + closeCh chan struct{} + testingKnobs *TestingKnobs } type eventBufChPayload struct { @@ -145,6 +146,8 @@ func (i *ConcurrentBufferIngester) ingest(events *eventBuffer) { i.registry.ObserveStatement(e.sessionID, e.statement) } else if e.transaction != nil { i.registry.ObserveTransaction(e.sessionID, e.transaction) + } else if e.sessionID != (clusterunique.ID{}) { + i.registry.clearSession(e.sessionID) } events[idx] = event{} } @@ -156,6 +159,12 @@ func (i *ConcurrentBufferIngester) ObserveStatement( if !i.registry.enabled() { return } + + if i.testingKnobs != nil && i.testingKnobs.InsightsWriterStmtInterceptor != nil { + i.testingKnobs.InsightsWriterStmtInterceptor(sessionID, statement) + return + } + i.guard.AtomicWrite(func(writerIdx int64) { i.guard.eventBuffer[writerIdx] = event{ sessionID: sessionID, @@ -170,6 +179,12 @@ func (i *ConcurrentBufferIngester) ObserveTransaction( if !i.registry.enabled() { return } + + if i.testingKnobs != nil && i.testingKnobs.InsightsWriterTxnInterceptor != nil { + i.testingKnobs.InsightsWriterTxnInterceptor(sessionID, transaction) + return + } + i.guard.AtomicWrite(func(writerIdx int64) { i.guard.eventBuffer[writerIdx] = event{ sessionID: sessionID, @@ -178,6 +193,16 @@ func (i *ConcurrentBufferIngester) ObserveTransaction( }) } +// ClearSession sends a signal to the underlying registry to clear any cached +// data associated with the given sessionID. This is an async operation. +func (i *ConcurrentBufferIngester) ClearSession(sessionID clusterunique.ID) { + i.guard.AtomicWrite(func(writerIdx int64) { + i.guard.eventBuffer[writerIdx] = event{ + sessionID: sessionID, + } + }) +} + func newConcurrentBufferIngester(registry *lockingRegistry) *ConcurrentBufferIngester { i := &ConcurrentBufferIngester{ // A channel size of 1 is sufficient to avoid unnecessarily diff --git a/pkg/sql/sqlstats/insights/ingester_test.go b/pkg/sql/sqlstats/insights/ingester_test.go index ec36f28b4480..6db09d08ad52 100644 --- a/pkg/sql/sqlstats/insights/ingester_test.go +++ b/pkg/sql/sqlstats/insights/ingester_test.go @@ -79,7 +79,7 @@ func TestIngester(t *testing.T) { newRegistry(st, &fakeDetector{ stubEnabled: true, stubIsSlow: true, - }, store), + }, store, nil), ) ingester.Start(ctx, stopper) @@ -134,7 +134,7 @@ func TestIngester_Clear(t *testing.T) { newRegistry(settings, &fakeDetector{ stubEnabled: true, stubIsSlow: true, - }, store)) + }, store, nil)) // Fill the ingester's buffer with some data. This sets us up to // call Clear() with guaranteed data in the buffer, so we can assert @@ -181,7 +181,7 @@ func TestIngester_Disabled(t *testing.T) { // the underlying registry is currently disabled. st := cluster.MakeTestingClusterSettings() - ingester := newConcurrentBufferIngester(newRegistry(st, &fakeDetector{}, newStore(st))) + ingester := newConcurrentBufferIngester(newRegistry(st, &fakeDetector{}, newStore(st), nil)) ingester.ObserveStatement(clusterunique.ID{}, &Statement{}) ingester.ObserveTransaction(clusterunique.ID{}, &Transaction{}) require.Equal(t, event{}, ingester.guard.eventBuffer[0]) @@ -200,7 +200,7 @@ func TestIngester_DoesNotBlockWhenReceivingManyObservationsAfterShutdown(t *test defer stopper.Stop(ctx) st := cluster.MakeTestingClusterSettings() - registry := newRegistry(st, &fakeDetector{stubEnabled: true}, newStore(st)) + registry := newRegistry(st, &fakeDetector{stubEnabled: true}, newStore(st), nil) ingester := newConcurrentBufferIngester(registry) ingester.Start(ctx, stopper) @@ -259,7 +259,7 @@ func TestIngesterBlockedForceSync(t *testing.T) { defer stopper.Stop(ctx) st := cluster.MakeTestingClusterSettings() - registry := newRegistry(st, &fakeDetector{stubEnabled: true}, newStore(st)) + registry := newRegistry(st, &fakeDetector{stubEnabled: true}, newStore(st), nil) ingester := newConcurrentBufferIngester(registry) // We queue up a bunch of sync operations because it's unclear how diff --git a/pkg/sql/sqlstats/insights/insights.go b/pkg/sql/sqlstats/insights/insights.go index 9a92e3ff10bc..4840950223f2 100644 --- a/pkg/sql/sqlstats/insights/insights.go +++ b/pkg/sql/sqlstats/insights/insights.go @@ -138,7 +138,7 @@ type PercentileValues struct { } // New builds a new Provider. -func New(st *cluster.Settings, metrics Metrics) *Provider { +func New(st *cluster.Settings, metrics Metrics, knobs *TestingKnobs) *Provider { store := newStore(st) anomalyDetector := newAnomalyDetector(st, metrics) @@ -148,7 +148,7 @@ func New(st *cluster.Settings, metrics Metrics) *Provider { newRegistry(st, &compositeDetector{detectors: []detector{ &latencyThresholdDetector{st: st}, anomalyDetector, - }}, store), + }}, store, knobs), ), anomalyDetector: anomalyDetector, } diff --git a/pkg/sql/sqlstats/insights/insights_test.go b/pkg/sql/sqlstats/insights/insights_test.go index f9dfbe1d8f9d..55c322d63273 100644 --- a/pkg/sql/sqlstats/insights/insights_test.go +++ b/pkg/sql/sqlstats/insights/insights_test.go @@ -45,7 +45,7 @@ func BenchmarkInsights(b *testing.B) { // down, guiding us as we tune buffer sizes, etc. for _, numSessions := range []int{1, 10, 100, 1000, 10000} { b.Run(fmt.Sprintf("numSessions=%d", numSessions), func(b *testing.B) { - provider := insights.New(settings, insights.NewMetrics()) + provider := insights.New(settings, insights.NewMetrics(), nil) provider.Start(ctx, stopper) // Spread the b.N work across the simulated SQL sessions, so that we diff --git a/pkg/sql/sqlstats/insights/integration/BUILD.bazel b/pkg/sql/sqlstats/insights/integration/BUILD.bazel index 7bc45adf7dca..e8abbcbb0118 100644 --- a/pkg/sql/sqlstats/insights/integration/BUILD.bazel +++ b/pkg/sql/sqlstats/insights/integration/BUILD.bazel @@ -12,6 +12,7 @@ go_test( "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/appstatspb", + "//pkg/sql/clusterunique", "//pkg/sql/contention", "//pkg/sql/sessiondata", "//pkg/sql/sqlstats/insights", diff --git a/pkg/sql/sqlstats/insights/integration/insights_test.go b/pkg/sql/sqlstats/insights/integration/insights_test.go index 4cdccd39433a..80af6205d173 100644 --- a/pkg/sql/sqlstats/insights/integration/insights_test.go +++ b/pkg/sql/sqlstats/insights/integration/insights_test.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/appstatspb" + "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" "github.com/cockroachdb/cockroach/pkg/sql/contention" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" @@ -957,3 +958,42 @@ func TestInsightsIndexRecommendationIntegration(t *testing.T) { return nil }, 1*time.Second) } + +// TestInsightsClearsPerSessionMemory ensures that memory allocated +// for a session is freed when that session is closed. +func TestInsightsClearsPerSessionMemory(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + sessionClosedCh := make(chan struct{}) + clearedSessionID := clusterunique.ID{} + ts := serverutils.StartServerOnly(t, base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Insights: &insights.TestingKnobs{ + OnSessionClear: func(sessionID clusterunique.ID) { + defer close(sessionClosedCh) + clearedSessionID = sessionID + }, + }, + }, + }) + defer ts.Stopper().Stop(ctx) + s := ts.ApplicationLayer() + conn1 := sqlutils.MakeSQLRunner(s.SQLConn(t)) + conn2 := sqlutils.MakeSQLRunner(s.SQLConn(t)) + + var sessionID1 string + conn1.QueryRow(t, "SHOW session_id").Scan(&sessionID1) + + // Start a transaction and cancel the session - ensure that the memory is freed. + conn1.Exec(t, "BEGIN") + for i := 0; i < 5; i++ { + conn1.Exec(t, "SELECT 1") + } + + conn2.Exec(t, "CANCEL SESSION $1", sessionID1) + + <-sessionClosedCh + require.Equal(t, clearedSessionID.String(), sessionID1) +} diff --git a/pkg/sql/sqlstats/insights/pool.go b/pkg/sql/sqlstats/insights/pool.go index 91093654891c..ef2c21285303 100644 --- a/pkg/sql/sqlstats/insights/pool.go +++ b/pkg/sql/sqlstats/insights/pool.go @@ -16,6 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" ) +// TODO (xinhaoz): Remove this pool (#128199). The insights object +// can use the existing statementBuf pool for the statements slice. var insightPool = sync.Pool{ New: func() interface{} { return new(Insight) @@ -32,6 +34,9 @@ func makeInsight(sessionID clusterunique.ID, transaction *Transaction) *Insight } func releaseInsight(insight *Insight) { + for i := range insight.Statements { + insight.Statements[i] = nil + } insight.Statements = insight.Statements[:0] *insight = Insight{Statements: insight.Statements} insightPool.Put(insight) diff --git a/pkg/sql/sqlstats/insights/registry.go b/pkg/sql/sqlstats/insights/registry.go index 8ee12d7ecace..e972ab979080 100644 --- a/pkg/sql/sqlstats/insights/registry.go +++ b/pkg/sql/sqlstats/insights/registry.go @@ -23,10 +23,11 @@ import ( // statement execution to determine which statements are outliers and // writes insights into the provided sink. type lockingRegistry struct { - statements map[clusterunique.ID]*statementBuf - detector detector - causes *causes - store *LockingStore + statements map[clusterunique.ID]*statementBuf + detector detector + causes *causes + store *LockingStore + testingKnobs *TestingKnobs } func (r *lockingRegistry) Clear() { @@ -185,6 +186,18 @@ func (r *lockingRegistry) ObserveTransaction(sessionID clusterunique.ID, transac r.store.addInsight(insight) } +// clearSession removes the session from the registry and releases the +// associated statement buffer. +func (r *lockingRegistry) clearSession(sessionID clusterunique.ID) { + if b, ok := r.statements[sessionID]; ok { + delete(r.statements, sessionID) + b.release() + if r.testingKnobs != nil && r.testingKnobs.OnSessionClear != nil { + r.testingKnobs.OnSessionClear(sessionID) + } + } +} + // TODO(todd): // // Once we can handle sufficient throughput to live on the hot @@ -195,11 +208,14 @@ func (r *lockingRegistry) enabled() bool { return r.detector.enabled() } -func newRegistry(st *cluster.Settings, detector detector, store *LockingStore) *lockingRegistry { +func newRegistry( + st *cluster.Settings, detector detector, store *LockingStore, knobs *TestingKnobs, +) *lockingRegistry { return &lockingRegistry{ - statements: make(map[clusterunique.ID]*statementBuf), - detector: detector, - causes: &causes{st: st}, - store: store, + statements: make(map[clusterunique.ID]*statementBuf), + detector: detector, + causes: &causes{st: st}, + store: store, + testingKnobs: knobs, } } diff --git a/pkg/sql/sqlstats/insights/registry_test.go b/pkg/sql/sqlstats/insights/registry_test.go index 62b8b4287140..cb5252d673f5 100644 --- a/pkg/sql/sqlstats/insights/registry_test.go +++ b/pkg/sql/sqlstats/insights/registry_test.go @@ -58,7 +58,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement) registry.ObserveTransaction(session.ID, transaction) @@ -96,7 +96,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement) // Transaction status is set during transaction stats recorded based on // if the transaction committed. We'll inject the failure here to align @@ -138,7 +138,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 0) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement) registry.ObserveTransaction(session.ID, transaction) @@ -162,7 +162,7 @@ func TestRegistry(t *testing.T) { LatencyInSeconds: 0.5, } store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement2) registry.ObserveTransaction(session.ID, transaction) @@ -195,7 +195,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement) registry.ObserveStatement(otherSession.ID, otherStatement) registry.ObserveTransaction(session.ID, transaction) @@ -246,7 +246,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statement) registry.ObserveStatement(session.ID, siblingStatement) registry.ObserveTransaction(session.ID, transaction) @@ -276,7 +276,7 @@ func TestRegistry(t *testing.T) { t.Run("txn with no stmts", func(t *testing.T) { transaction := &Transaction{ID: uuid.MakeV4()} st := cluster.MakeTestingClusterSettings() - registry := newRegistry(st, &latencyThresholdDetector{st: st}, newStore(st)) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, newStore(st), nil) require.NotPanics(t, func() { registry.ObserveTransaction(session.ID, transaction) }) }) @@ -284,7 +284,7 @@ func TestRegistry(t *testing.T) { transaction := &Transaction{ID: uuid.MakeV4()} st := cluster.MakeTestingClusterSettings() store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) contentionDuration := 10 * time.Second statement := &Statement{ Status: Statement_Completed, @@ -349,7 +349,7 @@ func TestRegistry(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) registry.ObserveStatement(session.ID, statementNotIgnored) registry.ObserveStatement(session.ID, statementIgnoredSet) registry.ObserveStatement(session.ID, statementIgnoredExplain) @@ -389,7 +389,7 @@ func TestInsightsRegistry_Clear(t *testing.T) { st := cluster.MakeTestingClusterSettings() LatencyThreshold.Override(ctx, &st.SV, 1*time.Second) store := newStore(st) - registry := newRegistry(st, &latencyThresholdDetector{st: st}, store) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) // Create some test data. sessionA := Session{ID: clusterunique.IDFromBytes([]byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))} sessionB := Session{ID: clusterunique.IDFromBytes([]byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))} @@ -403,9 +403,46 @@ func TestInsightsRegistry_Clear(t *testing.T) { registry.ObserveStatement(sessionA.ID, statement) registry.ObserveStatement(sessionB.ID, statement) expLenStmts := 2 + // No need to acquire the lock here, as the registry is not attached to anything. require.Len(t, registry.statements, expLenStmts) // Now clear the cache, assert it's cleared. registry.Clear() require.Empty(t, registry.statements) }) } + +func TestInsightsRegistry_ClearSession(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // Initialize the registry. + st := cluster.MakeTestingClusterSettings() + store := newStore(st) + registry := newRegistry(st, &latencyThresholdDetector{st: st}, store, nil) + + // Create some test data. + sessionA := Session{ID: clusterunique.IDFromBytes([]byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))} + sessionB := Session{ID: clusterunique.IDFromBytes([]byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))} + statement := &Statement{ + Status: Statement_Completed, + ID: clusterunique.IDFromBytes([]byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")), + FingerprintID: appstatspb.StmtFingerprintID(100), + LatencyInSeconds: 2, + } + + // Record the test data, assert it's cached. + registry.ObserveStatement(sessionA.ID, statement) + registry.ObserveStatement(sessionB.ID, statement) + // No need to acquire the lock here, as the registry is not attached to anything. + require.Len(t, registry.statements, 2) + + // Clear the cache, assert it's cleared. + registry.clearSession(sessionA.ID) + + // sessionA should be removed, sessionB should still be present. + b, ok := registry.statements[sessionA.ID] + require.False(t, ok) + require.Nil(t, b) + require.Len(t, registry.statements, 1) + require.NotEmpty(t, registry.statements[sessionB.ID]) +} diff --git a/pkg/sql/sqlstats/insights/test_utils.go b/pkg/sql/sqlstats/insights/test_utils.go new file mode 100644 index 000000000000..734d2fb9def2 --- /dev/null +++ b/pkg/sql/sqlstats/insights/test_utils.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package insights + +import "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" + +// TestingKnobs provides hooks and testingKnobs for unit tests. +type TestingKnobs struct { + // OnSessionClear is a callback that is triggered when the locking + // registry clears a session entry. + OnSessionClear func(sessionID clusterunique.ID) + + // InsightsWriterTxnInterceptor is a callback that's triggered when a txn insight + // is observed by the ingester. The callback is called instead of writing the + // insight to the buffer. + InsightsWriterTxnInterceptor func(sessionID clusterunique.ID, transaction *Transaction) + + // InsightsWriterStmtInterceptor is a callback that's triggered when a stmt insight + // is observed. The callback is called instead of writing the insight to the buffer. + InsightsWriterStmtInterceptor func(sessionID clusterunique.ID, statement *Statement) +} + +// ModuleTestingKnobs implements base.ModuleTestingKnobs interface. +func (*TestingKnobs) ModuleTestingKnobs() {} diff --git a/pkg/sql/sqlstats/sslocal/BUILD.bazel b/pkg/sql/sqlstats/sslocal/BUILD.bazel index 5030b695b68e..cf9813d4236e 100644 --- a/pkg/sql/sqlstats/sslocal/BUILD.bazel +++ b/pkg/sql/sqlstats/sslocal/BUILD.bazel @@ -19,6 +19,7 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql/appstatspb", + "//pkg/sql/clusterunique", "//pkg/sql/execstats", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sessionphase", diff --git a/pkg/sql/sqlstats/sslocal/sql_stats_test.go b/pkg/sql/sqlstats/sslocal/sql_stats_test.go index 2cc3ab648e87..54df0395baf8 100644 --- a/pkg/sql/sqlstats/sslocal/sql_stats_test.go +++ b/pkg/sql/sqlstats/sslocal/sql_stats_test.go @@ -448,7 +448,7 @@ func TestExplicitTxnFingerprintAccounting(t *testing.T) { Settings: st, }) - insightsProvider := insights.New(st, insights.NewMetrics()) + insightsProvider := insights.New(st, insights.NewMetrics(), nil) sqlStats := sslocal.New( st, sqlstats.MaxMemSQLStatsStmtFingerprints, @@ -576,7 +576,7 @@ func TestAssociatingStmtStatsWithTxnFingerprint(t *testing.T) { require.NoError(t, err) // Construct the SQL Stats machinery. - insightsProvider := insights.New(st, insights.NewMetrics()) + insightsProvider := insights.New(st, insights.NewMetrics(), nil) sqlStats := sslocal.New( st, sqlstats.MaxMemSQLStatsStmtFingerprints, @@ -1725,7 +1725,7 @@ func TestSQLStats_ConsumeStats(t *testing.T) { Name: "test", Settings: st, }) - insightsProvider := insights.New(st, insights.NewMetrics()) + insightsProvider := insights.New(st, insights.NewMetrics(), nil) sqlStats := sslocal.New( st, diff --git a/pkg/sql/sqlstats/sslocal/sslocal_stats_collector.go b/pkg/sql/sqlstats/sslocal/sslocal_stats_collector.go index af88d6faa2e8..04525523a4b4 100644 --- a/pkg/sql/sqlstats/sslocal/sslocal_stats_collector.go +++ b/pkg/sql/sqlstats/sslocal/sslocal_stats_collector.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/appstatspb" + "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sessionphase" @@ -26,7 +27,8 @@ import ( ) // StatsCollector is used to collect statistics for transactions and -// statements for the entire lifetime of a session. +// statements for the entire lifetime of a session. It must be closed +// with Close() when the session is done. type StatsCollector struct { // currentTransactionStatementStats contains the current transaction's statement @@ -139,8 +141,10 @@ func (s *StatsCollector) Reset(appStats sqlstats.ApplicationStats, phaseTime *se s.stmtFingerprintID = 0 } -// Free frees any local memory used by the stats collector. -func (s *StatsCollector) Free(ctx context.Context) { +// Close frees any local memory used by the stats collector and +// any memory allocated by underlying sql stats systems for the session +// that owns this stats collector. +func (s *StatsCollector) Close(ctx context.Context, sessionID clusterunique.ID) { // For stats collectors for executors with outer transactions, // the currentTransactionStatementStats is the flush target. // We should make sure we're never freeing the flush target, @@ -148,6 +152,9 @@ func (s *StatsCollector) Free(ctx context.Context) { if s.currentTransactionStatementStats != s.flushTarget { s.currentTransactionStatementStats.Free(ctx) } + if s.insightsWriter != nil { + s.insightsWriter.ClearSession(sessionID) + } } // StartTransaction sets up the StatsCollector for a new transaction. @@ -272,9 +279,7 @@ func (s *StatsCollector) ObserveStatement( ErrorCode: errorCode, ErrorMsg: errorMsg, } - if s.knobs != nil && s.knobs.InsightsWriterStmtInterceptor != nil { - s.knobs.InsightsWriterStmtInterceptor(value.SessionID, &insight) - } else if s.insightsWriter != nil { + if s.insightsWriter != nil { s.insightsWriter.ObserveStatement(value.SessionID, &insight) } } @@ -331,9 +336,7 @@ func (s *StatsCollector) ObserveTransaction( LastErrorMsg: errorMsg, Status: status, } - if s.knobs != nil && s.knobs.InsightsWriterTxnInterceptor != nil { - s.knobs.InsightsWriterTxnInterceptor(ctx, value.SessionID, &insight) - } else if s.insightsWriter != nil { + if s.insightsWriter != nil { s.insightsWriter.ObserveTransaction(value.SessionID, &insight) } } diff --git a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer_test.go b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer_test.go index ed3f755bea3e..d16eca72f092 100644 --- a/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer_test.go +++ b/pkg/sql/sqlstats/ssmemstorage/ss_mem_writer_test.go @@ -38,7 +38,7 @@ func TestRecordStatement(t *testing.T) { sqlstats.TxnStatsEnable.Override(ctx, &settings.SV, false) // Initialize knobs & mem container. numStmtInsights := 0 - knobs := &sqlstats.TestingKnobs{ + knobs := &insights.TestingKnobs{ InsightsWriterStmtInterceptor: func(sessionID clusterunique.ID, statement *insights.Statement) { numStmtInsights++ }, @@ -47,8 +47,8 @@ func TestRecordStatement(t *testing.T) { nil, /* uniqueServerCount */ testMonitor(ctx, "test-mon", settings), "test-app", - knobs, - insights.New(settings, insights.NewMetrics()).Anomalies(), + nil, + insights.New(settings, insights.NewMetrics(), knobs).Anomalies(), ) // Record a statement, ensure no insights are generated. statsKey := appstatspb.StatementStatisticsKey{ @@ -72,8 +72,8 @@ func TestRecordTransaction(t *testing.T) { sqlstats.TxnStatsEnable.Override(ctx, &settings.SV, false) // Initialize knobs & mem container. numTxnInsights := 0 - knobs := &sqlstats.TestingKnobs{ - InsightsWriterTxnInterceptor: func(ctx context.Context, sessionID clusterunique.ID, transaction *insights.Transaction) { + knobs := &insights.TestingKnobs{ + InsightsWriterTxnInterceptor: func(sessionID clusterunique.ID, transaction *insights.Transaction) { numTxnInsights++ }, } @@ -81,8 +81,8 @@ func TestRecordTransaction(t *testing.T) { nil, /* uniqueServerCount */ testMonitor(ctx, "test-mon", settings), "test-app", - knobs, - insights.New(settings, insights.NewMetrics()).Anomalies(), + nil, + insights.New(settings, insights.NewMetrics(), knobs).Anomalies(), ) // Record a transaction, ensure no insights are generated. require.NoError(t, memContainer.RecordTransaction(ctx, appstatspb.TransactionFingerprintID(123), sqlstats.RecordedTxnStats{})) diff --git a/pkg/sql/sqlstats/test_utils.go b/pkg/sql/sqlstats/test_utils.go index b5c23eddab74..7607cbc62d9f 100644 --- a/pkg/sql/sqlstats/test_utils.go +++ b/pkg/sql/sqlstats/test_utils.go @@ -10,13 +10,7 @@ package sqlstats -import ( - "context" - "time" - - "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" - "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" -) +import "time" // TestingKnobs provides hooks and knobs for unit tests. type TestingKnobs struct { @@ -28,16 +22,6 @@ type TestingKnobs struct { // finishes flushing. OnTxnStatsFlushFinished func() - // InsightsWriterTxnInterceptor is a callback that's triggered when a txn insight - // is observed when recording txn stats. The callback is called instead of the legitimate - // insights.Writer. - InsightsWriterTxnInterceptor func(ctx context.Context, sessionID clusterunique.ID, transaction *insights.Transaction) - - // InsightsWriterStmtInterceptor is a callback that's triggered when a stmt insight - // is observed when recording stmt stats. The callback is called instead of the legitimate - // insights.Writer. - InsightsWriterStmtInterceptor func(sessionID clusterunique.ID, statement *insights.Statement) - // OnCleanupStartForShard is a callback that is triggered when background // cleanup job starts to delete data from a shard from the system table. OnCleanupStartForShard func(shardIdx int, existingCountInShard, shardLimit int64) diff --git a/pkg/sql/sqltelemetry/show.go b/pkg/sql/sqltelemetry/show.go index 0d02e47b898d..8ff540e80c7a 100644 --- a/pkg/sql/sqltelemetry/show.go +++ b/pkg/sql/sqltelemetry/show.go @@ -64,6 +64,8 @@ const ( CreateExternalConnection // ExternalConnection represents the SHOW EXTERNAL CONNECTION command. ExternalConnection + // LogicalReplicationJobs represents the SHOW LOGICAL REPLICATION JOBS command. + LogicalReplicationJobs ) var showTelemetryNameMap = map[ShowTelemetryType]string{ @@ -88,6 +90,7 @@ var showTelemetryNameMap = map[ShowTelemetryType]string{ SuperRegions: "super_regions", CreateExternalConnection: "create_external_connection", ExternalConnection: "external_connection", + LogicalReplicationJobs: "logical_replication_jobs", } func (s ShowTelemetryType) String() string { diff --git a/pkg/sql/stats/BUILD.bazel b/pkg/sql/stats/BUILD.bazel index af993f2643a9..45c475a89b48 100644 --- a/pkg/sql/stats/BUILD.bazel +++ b/pkg/sql/stats/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "histogram.go", "json.go", "merge.go", + "most_common_values.go", "new_stat.go", "quantile.go", "row_sampling.go", @@ -49,6 +50,7 @@ go_library( "//pkg/sql/sqlerrors", "//pkg/sql/types", "//pkg/util/cache", + "//pkg/util/container/heap", "//pkg/util/encoding", "//pkg/util/errorutil", "//pkg/util/hlc", diff --git a/pkg/sql/stats/automatic_stats.go b/pkg/sql/stats/automatic_stats.go index 2b61d2dedbe5..3a79bfb9c3cf 100644 --- a/pkg/sql/stats/automatic_stats.go +++ b/pkg/sql/stats/automatic_stats.go @@ -43,6 +43,17 @@ var AutomaticStatisticsClusterMode = settings.RegisterBoolSetting( true, settings.WithPublic) +// AutomaticPartialStatisticsClusterMode controls the cluster setting for +// enabling automatic table partial statistics collection. If automatic full +// table statistics are disabled for a table, then automatic partial statistics +// will also be disabled. +var AutomaticPartialStatisticsClusterMode = settings.RegisterBoolSetting( + settings.ApplicationLevel, + catpb.AutoPartialStatsEnabledSettingName, + "automatic partial statistics collection mode", + false, + settings.WithPublic) + // UseStatisticsOnSystemTables controls the cluster setting for enabling // statistics usage by the optimizer for planning queries involving system // tables. @@ -98,6 +109,19 @@ var AutomaticStatisticsFractionStaleRows = settings.RegisterFloatSetting( settings.WithPublic, ) +// AutomaticPartialStatisticsFractionStaleRows controls the cluster setting for +// the target fraction of rows in a table that should be stale before partial +// statistics on that table are refreshed, in addition to the constant value +// AutomaticPartialStatisticsMinStaleRows. +var AutomaticPartialStatisticsFractionStaleRows = settings.RegisterFloatSetting( + settings.ApplicationLevel, + catpb.AutoPartialStatsFractionStaleSettingName, + "target fraction of stale rows per table that will trigger a partial statistics refresh", + 0.05, + settings.NonNegativeFloat, + settings.WithPublic, +) + // AutomaticStatisticsMinStaleRows controls the cluster setting for the target // number of rows that should be updated before a table is refreshed, in // addition to the fraction AutomaticStatisticsFractionStaleRows. @@ -110,6 +134,18 @@ var AutomaticStatisticsMinStaleRows = settings.RegisterIntSetting( settings.WithPublic, ) +// AutomaticPartialStatisticsMinStaleRows controls the cluster setting for the +// target number of rows that should be updated before a table is refreshed, in +// addition to the fraction AutomaticStatisticsFractionStaleRows. +var AutomaticPartialStatisticsMinStaleRows = settings.RegisterIntSetting( + settings.ApplicationLevel, + catpb.AutoPartialStatsMinStaleSettingName, + "target minimum number of stale rows per table that will trigger a partial statistics refresh", + 100, + settings.NonNegativeInt, + settings.WithPublic, +) + // statsGarbageCollectionInterval controls the interval between running an // internal query to delete stats for dropped tables. var statsGarbageCollectionInterval = settings.RegisterDurationSetting( @@ -171,7 +207,10 @@ const ( // // The Refresher is designed to schedule a CREATE STATISTICS refresh job after // approximately X% of total rows have been updated/inserted/deleted in a given -// table. Currently, X is hardcoded to be 20%. +// table. It also schedules partial stats refresh jobs (CREATE STATISTICS ... +// USING EXTREMES) jobs after approximately Y% of total rows have been affected, +// where Y < X. X is 20% and Y is 5% by default. These values can be configured +// with their respective cluster and table settings. // // The decision to refresh is based on a percentage rather than a fixed number // of rows because if a table is huge and rarely updated, we don't want to @@ -179,15 +218,22 @@ const ( // updated, we want to update stats more often. // // To avoid contention on row update counters, we use a statistical approach. -// For example, suppose we want to refresh stats after 20% of rows are updated -// and there are currently 1M rows in the table. If a user updates 10 rows, -// we use random number generation to refresh stats with probability +// For example, suppose we want to refresh full stats after 20% of rows are +// updated, and we want to refresh partial stats after 5% of rows are updated. +// If there are currently 1M rows in the table and a user updates 10 rows, +// we use random number generation to refresh full stats with probability // 10/(1M * 0.2) = 0.00005. The general formula is: // // # rows updated/inserted/deleted // p = -------------------------------------------------------------------- // (# rows in table) * (target fraction of rows updated before refresh) // +// If we decide not to refresh full stats based on the above probability, we +// make a decision to refresh stats with a partial collection. This is done +// using the same formula as above, but with a smaller target fraction of rows +// changed before refresh. This ensures that we maintain stats on new values +// that are added to the table between full stats refreshes. +// // The existing statistics in the stats cache are used to get the number of // rows in the table. // @@ -284,6 +330,10 @@ type TableStatsTestingKnobs struct { // DisableInitialTableCollection, if set, indicates that the "initial table // collection" performed by the Refresher should be skipped. DisableInitialTableCollection bool + // DisableFullStatsRefresh, if set, indicates that the Refresher should not + // perform full statistics refreshes. Useful for testing the partial stats + // refresh logic. + DisableFullStatsRefresh bool } var _ base.ModuleTestingKnobs = &TableStatsTestingKnobs{} @@ -336,6 +386,20 @@ func (r *Refresher) autoStatsEnabled(desc catalog.TableDescriptor) bool { return enabledForTable == catpb.AutoStatsCollectionEnabled } +func (r *Refresher) autoPartialStatsEnabled(desc catalog.TableDescriptor) bool { + if desc == nil { + // If the descriptor could not be accessed, defer to the cluster setting. + return AutomaticPartialStatisticsClusterMode.Get(&r.st.SV) + } + enabledForTable := desc.AutoPartialStatsCollectionEnabled() + // The table-level setting of sql_stats_automatic_partial_collection_enabled + // takes precedence over the cluster setting. + if enabledForTable == catpb.AutoPartialStatsCollectionNotSet { + return AutomaticPartialStatisticsClusterMode.Get(&r.st.SV) + } + return enabledForTable == catpb.AutoPartialStatsCollectionEnabled +} + func (r *Refresher) autoStatsEnabledForTableID( tableID descpb.ID, settingOverrides map[descpb.ID]catpb.AutoStatsSettings, ) bool { @@ -368,6 +432,16 @@ func (r *Refresher) autoStatsMinStaleRows(explicitSettings *catpb.AutoStatsSetti return AutomaticStatisticsMinStaleRows.Get(&r.st.SV) } +func (r *Refresher) autoPartialStatsMinStaleRows(explicitSettings *catpb.AutoStatsSettings) int64 { + if explicitSettings == nil { + return AutomaticPartialStatisticsMinStaleRows.Get(&r.st.SV) + } + if minStaleRows, ok := explicitSettings.AutoPartialStatsMinStaleRows(); ok { + return minStaleRows + } + return AutomaticPartialStatisticsMinStaleRows.Get(&r.st.SV) +} + func (r *Refresher) autoStatsFractionStaleRows(explicitSettings *catpb.AutoStatsSettings) float64 { if explicitSettings == nil { return AutomaticStatisticsFractionStaleRows.Get(&r.st.SV) @@ -378,6 +452,18 @@ func (r *Refresher) autoStatsFractionStaleRows(explicitSettings *catpb.AutoStats return AutomaticStatisticsFractionStaleRows.Get(&r.st.SV) } +func (r *Refresher) autoPartialStatsFractionStaleRows( + explicitSettings *catpb.AutoStatsSettings, +) float64 { + if explicitSettings == nil { + return AutomaticPartialStatisticsFractionStaleRows.Get(&r.st.SV) + } + if fractionStaleRows, ok := explicitSettings.AutoPartialStatsFractionStaleRows(); ok { + return fractionStaleRows + } + return AutomaticPartialStatisticsFractionStaleRows.Get(&r.st.SV) +} + func (r *Refresher) getTableDescriptor( ctx context.Context, tableID descpb.ID, ) (desc catalog.TableDescriptor) { @@ -530,7 +616,15 @@ func (r *Refresher) Start( explicitSettings = &settings } } - r.maybeRefreshStats(ctx, stopper, tableID, explicitSettings, rowsAffected, r.asOfTime) + r.maybeRefreshStats( + ctx, + stopper, + tableID, + explicitSettings, + rowsAffected, + r.asOfTime, + r.autoPartialStatsEnabled(desc), + ) select { case <-ctx.Done(): @@ -759,6 +853,7 @@ func (r *Refresher) maybeRefreshStats( explicitSettings *catpb.AutoStatsSettings, rowsAffected int64, asOf time.Duration, + maybeRefreshPartialStats bool, ) { tableStats, err := r.cache.getTableStatsFromCache(ctx, tableID, nil /* forecast */, nil /* udtCols */) if err != nil { @@ -768,7 +863,9 @@ func (r *Refresher) maybeRefreshStats( var rowCount float64 mustRefresh := false - if stat := mostRecentAutomaticStat(tableStats); stat != nil { + isPartial := false + stat := mostRecentAutomaticFullStat(tableStats) + if stat != nil { // Check if too much time has passed since the last refresh. // This check is in place to corral statistical outliers and avoid a // case where a significant portion of the data in a table has changed but @@ -791,8 +888,8 @@ func (r *Refresher) maybeRefreshStats( } rowCount = float64(stat.RowCount) } else { - // If there are no statistics available on this table, we must perform a - // refresh. + // If there are no full statistics available on this table, we must perform + // a refresh. mustRefresh = true } @@ -804,12 +901,32 @@ func (r *Refresher) maybeRefreshStats( if targetRows > 0 { randomTargetRows = r.randGen.randInt(targetRows) } - if !mustRefresh && rowsAffected < math.MaxInt32 && randomTargetRows >= rowsAffected { - // No refresh is happening this time. - return + if (!mustRefresh && rowsAffected < math.MaxInt32 && randomTargetRows >= rowsAffected) || + (r.knobs != nil && r.knobs.DisableFullStatsRefresh) { + // No full statistics refresh is happening this time. Let's try a partial + // stats refresh. + if !maybeRefreshPartialStats { + // No refresh is happening this time, full or partial + return + } + + randomTargetRows = int64(0) + partialStatsMinStaleRows := r.autoPartialStatsMinStaleRows(explicitSettings) + partialStatsFractionStaleRows := r.autoPartialStatsFractionStaleRows(explicitSettings) + targetRows = int64(rowCount*partialStatsFractionStaleRows) + partialStatsMinStaleRows + // randInt will panic if we pass it a value of 0. + if targetRows > 0 { + randomTargetRows = r.randGen.randInt(targetRows) + } + if randomTargetRows >= rowsAffected { + // No refresh is happening this time, full or partial + return + } + + isPartial = true } - if err := r.refreshStats(ctx, tableID, asOf); err != nil { + if err := r.refreshStats(ctx, tableID, asOf, isPartial); err != nil { if errors.Is(err, ConcurrentCreateStatsError) { // Another stats job was already running. Attempt to reschedule this // refresh. @@ -853,15 +970,25 @@ func (r *Refresher) maybeRefreshStats( } } -func (r *Refresher) refreshStats(ctx context.Context, tableID descpb.ID, asOf time.Duration) error { +func (r *Refresher) refreshStats( + ctx context.Context, tableID descpb.ID, asOf time.Duration, isPartial bool, +) error { + var usingExtremes string + autoStatsJobName := jobspb.AutoStatsName + if isPartial { + usingExtremes = " USING EXTREMES" + autoStatsJobName = jobspb.AutoPartialStatsName + } // Create statistics for all default column sets on the given table. stmt := fmt.Sprintf( - "CREATE STATISTICS %s FROM [%d] WITH OPTIONS THROTTLING %g AS OF SYSTEM TIME '-%s'", - jobspb.AutoStatsName, + "CREATE STATISTICS %s FROM [%d] WITH OPTIONS THROTTLING %g AS OF SYSTEM TIME '-%s'%s", + autoStatsJobName, tableID, AutomaticStatisticsMaxIdleTime.Get(&r.st.SV), asOf.String(), + usingExtremes, ) + log.Infof(ctx, "automatically executing %q", stmt) _ /* rows */, err := r.internalDB.Executor().Exec( ctx, @@ -872,9 +999,9 @@ func (r *Refresher) refreshStats(ctx context.Context, tableID descpb.ID, asOf ti return err } -// mostRecentAutomaticStat finds the most recent automatic statistic +// mostRecentAutomaticFullStat finds the most recent automatic statistic // (identified by the name AutoStatsName). -func mostRecentAutomaticStat(tableStats []*TableStatistic) *TableStatistic { +func mostRecentAutomaticFullStat(tableStats []*TableStatistic) *TableStatistic { // Stats are sorted with the most recent first. for _, stat := range tableStats { if stat.Name == jobspb.AutoStatsName { diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go index 3180c08fd378..ee6921ff789f 100644 --- a/pkg/sql/stats/automatic_stats_test.go +++ b/pkg/sql/stats/automatic_stats_test.go @@ -58,6 +58,9 @@ func TestMaybeRefreshStats(t *testing.T) { AutomaticStatisticsClusterMode.Override(ctx, &st.SV, false) AutomaticStatisticsMinStaleRows.Override(ctx, &st.SV, 5) + AutomaticPartialStatisticsClusterMode.Override(ctx, &st.SV, false) + AutomaticPartialStatisticsMinStaleRows.Override(ctx, &st.SV, 5) + sqlRun := sqlutils.MakeSQLRunner(sqlDB) sqlRun.Exec(t, `CREATE DATABASE t; @@ -77,55 +80,81 @@ func TestMaybeRefreshStats(t *testing.T) { refresher := MakeRefresher(s.AmbientCtx(), st, internalDB, cache, time.Microsecond /* asOfTime */, nil /* knobs */) // There should not be any stats yet. - if err := checkStatsCount(ctx, cache, descA, 0 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 0 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } - // There are no stats yet, so this must refresh the statistics on table t + // There are no stats yet, so this must refresh the full statistics on table t // even though rowsAffected=0. refresher.maybeRefreshStats( - ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descA, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } + // Clear the stat cache to ensure that upcoming partial stat collections see + // the latest full statistic. + sqlRun.Exec(t, `SELECT crdb_internal.clear_table_stats_cache();`) + // Try to refresh again. With rowsAffected=0, the probability of a refresh // is 0, so refreshing will not succeed. refresher.maybeRefreshStats( - ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descA, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } - // Setting minStaleRows for the table prevents refreshing from occurring. + // Setting minStaleRows for the table prevents a full stat refresh from + // occurring, but partial stats must be refreshed. minStaleRows := int64(100000000) explicitSettings := catpb.AutoStatsSettings{MinStaleRows: &minStaleRows} refresher.maybeRefreshStats( - ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ + ) + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 1 /* expectedPartial */); err != nil { + t.Fatal(err) + } + + // Do the same for partialMinStaleRows to also prevent a partial refresh. + explicitSettings.PartialMinStaleRows = &minStaleRows + refresher.maybeRefreshStats( + ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descA, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 1 /* expectedPartial */); err != nil { t.Fatal(err) } - // Setting fractionStaleRows for the table can also prevent refreshing from - // occurring, though this is a not a typical value for this setting. + // Setting fractionStaleRows for the table can also prevent a full refresh + // from occurring, though this is a not a typical value for this setting. + // Partial stats will still be refreshed. fractionStaleRows := float64(100000000) explicitSettings = catpb.AutoStatsSettings{FractionStaleRows: &fractionStaleRows} refresher.maybeRefreshStats( - ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ + ) + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 2 /* expectedPartial */); err != nil { + t.Fatal(err) + } + + // Do the same for partialFractionStaleRows to also prevent a partial refresh. + explicitSettings.PartialFractionStaleRows = &fractionStaleRows + refresher.maybeRefreshStats( + ctx, s.AppStopper(), descA.GetID(), &explicitSettings, 10 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descA, 1 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 1 /* expectedFull */, 2 /* expectedPartial */); err != nil { t.Fatal(err) } - // With rowsAffected=10, refreshing should work. Since there are more rows + // With rowsAffected=10, a full refresh should work. Since there are more rows // updated than exist in the table, the probability of a refresh is 100%. + // Partial stats should not be refreshed since full stats are being refreshed, + // and stale partial stats should be cleared. refresher.maybeRefreshStats( - ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 10 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descA.GetID(), nil /* explicitSettings */, 10 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descA, 2 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descA, 2 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -134,9 +163,9 @@ func TestMaybeRefreshStats(t *testing.T) { descRoleOptions := desctestutils.TestingGetPublicTableDescriptor(s.DB(), codec, "system", "role_options") refresher.maybeRefreshStats( - ctx, s.AppStopper(), descRoleOptions.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descRoleOptions.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descRoleOptions, 5 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descRoleOptions, 5 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -144,9 +173,9 @@ func TestMaybeRefreshStats(t *testing.T) { descLease := desctestutils.TestingGetPublicTableDescriptor(s.DB(), codec, "system", "lease") refresher.maybeRefreshStats( - ctx, s.AppStopper(), descLease.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descLease.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descLease, 0 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descLease, 0 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -154,9 +183,9 @@ func TestMaybeRefreshStats(t *testing.T) { descTableStats := desctestutils.TestingGetPublicTableDescriptor(s.DB(), codec, "system", "table_statistics") refresher.maybeRefreshStats( - ctx, s.AppStopper(), descTableStats.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descTableStats.GetID(), nil /* explicitSettings */, 10000 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, descTableStats, 0 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, descTableStats, 0 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -165,7 +194,7 @@ func TestMaybeRefreshStats(t *testing.T) { // TODO(rytaft): Should not enqueue views to begin with. descVW := desctestutils.TestingGetPublicTableDescriptor(s.DB(), codec, "t", "vw") refresher.maybeRefreshStats( - ctx, s.AppStopper(), descVW.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), descVW.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) select { case <-refresher.mutations: @@ -405,9 +434,9 @@ func TestAverageRefreshTime(t *testing.T) { if err != nil { return err } - stat := mostRecentAutomaticStat(stats) + stat := mostRecentAutomaticFullStat(stats) if stat == nil { - return fmt.Errorf("no recent automatic statistic found") + return fmt.Errorf("no recent automatic full statistic found") } if !lessThan && stat.CreatedAt.After(curTime.Add(-1*expectedAge)) { return fmt.Errorf("most recent stat is less than %s old. Created at: %s Current time: %s", @@ -479,7 +508,7 @@ func TestAverageRefreshTime(t *testing.T) { }); err != nil { t.Fatal(err) } - if err := checkStatsCount(ctx, cache, table, 10 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, table, 10 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -511,7 +540,7 @@ func TestAverageRefreshTime(t *testing.T) { }); err != nil { t.Fatal(err) } - if err := checkStatsCount(ctx, cache, table, 20 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, table, 20 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -534,9 +563,9 @@ func TestAverageRefreshTime(t *testing.T) { // the statistics on table t. With rowsAffected=0, the probability of refresh // is 0. refresher.maybeRefreshStats( - ctx, s.AppStopper(), table.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), table.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, table, 20 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, table, 20 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -562,7 +591,7 @@ func TestAverageRefreshTime(t *testing.T) { }); err != nil { t.Fatal(err) } - if err := checkStatsCount(ctx, cache, table, 30 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, table, 30 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } @@ -584,9 +613,9 @@ func TestAverageRefreshTime(t *testing.T) { // remain (5 from column k and 5 from column v), since the old stats on k // and v were deleted. refresher.maybeRefreshStats( - ctx, s.AppStopper(), table.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond, /* asOf */ + ctx, s.AppStopper(), table.GetID(), nil /* explicitSettings */, 0 /* rowsAffected */, time.Microsecond /* asOf */, true, /* maybeRefreshPartialStats */ ) - if err := checkStatsCount(ctx, cache, table, 10 /* expected */); err != nil { + if err := checkStatsCount(ctx, cache, table, 10 /* expectedFull */, 0 /* expectedPartial */); err != nil { t.Fatal(err) } } @@ -733,7 +762,7 @@ func TestNoRetryOnFailure(t *testing.T) { // Try to refresh stats on a table that doesn't exist. r.maybeRefreshStats( ctx, s.AppStopper(), 100 /* tableID */, nil /* explicitSettings */, math.MaxInt32, - time.Microsecond, /* asOfTime */ + time.Microsecond /* asOfTime */, false, /* maybeRefreshPartialStats */ ) // Ensure that we will not try to refresh tableID 100 again. @@ -880,21 +909,33 @@ func TestAnalyzeSystemTables(t *testing.T) { } func checkStatsCount( - ctx context.Context, cache *TableStatisticsCache, table catalog.TableDescriptor, expected int, + ctx context.Context, + cache *TableStatisticsCache, + table catalog.TableDescriptor, + expectedFull int, + expectedPartial int, ) error { return testutils.SucceedsSoonError(func() error { + cache.InvalidateTableStats(ctx, table.GetID()) + stats, err := cache.GetTableStats(ctx, table) if err != nil { return err } - var count int + var fullStatCount int + var partialStatCount int for i := range stats { - if stats[i].Name != jobspb.ForecastStatsName { - count++ + if stats[i].IsPartial() { + partialStatCount++ + } else if !(stats[i].IsForecast() || stats[i].IsMerged()) { + fullStatCount++ } } - if count != expected { - return fmt.Errorf("expected %d stat(s) but found %d", expected, count) + if fullStatCount != expectedFull { + return fmt.Errorf("expected %d full stat(s) but found %d", expectedFull, fullStatCount) + } + if partialStatCount != expectedPartial { + return fmt.Errorf("expected %d partial stat(s) but found %d", expectedPartial, partialStatCount) } return nil }) diff --git a/pkg/sql/stats/create_stats_job_test.go b/pkg/sql/stats/create_stats_job_test.go index 33c8bcb99003..85e492d16f7e 100644 --- a/pkg/sql/stats/create_stats_job_test.go +++ b/pkg/sql/stats/create_stats_job_test.go @@ -222,6 +222,9 @@ func TestCreateStatisticsCanBeCancelled(t *testing.T) { require.ErrorContains(t, err, "pq: query execution canceled") } +// TestAtMostOneRunningCreateStats tests that auto stat jobs (full or partial) +// don't run when a full stats job is running. It also tests that manual stat +// jobs (full or partial) are always allowed to run. func TestAtMostOneRunningCreateStats(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -250,28 +253,52 @@ func TestAtMostOneRunningCreateStats(t *testing.T) { sqlDB.QueryRow(t, `SELECT 'd.t'::regclass::int`).Scan(&tID) setTableID(tID) - // Start a CREATE STATISTICS run and wait until it's done one scan. + autoFullStatsRunShouldFail := func() { + _, err := conn.Exec(`CREATE STATISTICS __auto__ FROM d.t`) + expected := "another CREATE STATISTICS job is already running" + if !testutils.IsError(err, expected) { + t.Fatalf("expected '%s' error, but got %v", expected, err) + } + } + autoPartialStatsRunShouldFail := func() { + _, err := conn.Exec(`CREATE STATISTICS __auto_partial__ FROM d.t USING EXTREMES`) + expected := "another CREATE STATISTICS job is already running" + if !testutils.IsError(err, expected) { + t.Fatalf("expected '%s' error, but got %v", expected, err) + } + } + + // Start a full stat run and let it complete so that future partial stats can + // be collected allowRequest = make(chan struct{}) - errCh := make(chan error) + initialFullStatErrCh := make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS full_statistic FROM d.t`) + initialFullStatErrCh <- err + }() + close(allowRequest) + if err := <-initialFullStatErrCh; err != nil { + t.Fatalf("create stats job should have completed: %s", err) + } + + // Start a manual full stat run and wait until it's done one scan. This will + // be the stat job that runs in the background as we test the behavior of new + // stat jobs. + allowRequest = make(chan struct{}) + runningManualFullStatErrCh := make(chan error) go func() { _, err := conn.Exec(`CREATE STATISTICS s1 FROM d.t`) - errCh <- err + runningManualFullStatErrCh <- err }() select { case allowRequest <- struct{}{}: - case err := <-errCh: + case err := <-runningManualFullStatErrCh: t.Fatal(err) } - autoStatsRunShouldFail := func() { - _, err := conn.Exec(`CREATE STATISTICS __auto__ FROM d.t`) - expected := "another CREATE STATISTICS job is already running" - if !testutils.IsError(err, expected) { - t.Fatalf("expected '%s' error, but got %v", expected, err) - } - } - // Attempt to start an automatic stats run. It should fail. - autoStatsRunShouldFail() + // Attempt to start automatic full and partial stats runs. Both should fail. + autoFullStatsRunShouldFail() + autoPartialStatsRunShouldFail() // PAUSE JOB does not block until the job is paused but only requests it. // Wait until the job is set to paused. @@ -297,33 +324,181 @@ func TestAtMostOneRunningCreateStats(t *testing.T) { t.Fatal(err) } - // Starting another automatic stats run should still fail. - autoStatsRunShouldFail() + // Starting automatic full and partial stats run should still fail. + autoFullStatsRunShouldFail() + autoPartialStatsRunShouldFail() - // Attempt to start a regular stats run. It should succeed. - errCh2 := make(chan error) + // Attempt to start manual full and partial stat runs. Both should succeed. + manualFullStatErrCh := make(chan error) go func() { _, err := conn.Exec(`CREATE STATISTICS s2 FROM d.t`) - errCh2 <- err + manualFullStatErrCh <- err + }() + manualPartialStatErrCh := make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS ps1 FROM d.t USING EXTREMES`) + manualPartialStatErrCh <- err }() + select { case allowRequest <- struct{}{}: - case err := <-errCh: + case err := <-runningManualFullStatErrCh: + t.Fatal(err) + case err := <-manualFullStatErrCh: t.Fatal(err) - case err := <-errCh2: + case err := <-manualPartialStatErrCh: t.Fatal(err) } + + // Allow the running full stat job and the new full and partial stat jobs to complete. close(allowRequest) - // Verify that the second job completed successfully. - if err := <-errCh2; err != nil { + // Verify that the manual full and partial stat jobs completed successfully. + if err := <-manualFullStatErrCh; err != nil { t.Fatalf("create stats job should have completed: %s", err) } + if err := <-manualPartialStatErrCh; err != nil { + t.Fatalf("create partial stats job should have completed: %s", err) + } - // Verify that the first job completed successfully. + // Verify that the running full stat job completed successfully. sqlDB.Exec(t, fmt.Sprintf("RESUME JOB %d", jobID)) jobutils.WaitForJobToSucceed(t, sqlDB, jobID) - <-errCh + <-runningManualFullStatErrCh +} + +// TestBackgroundAutoPartialStats tests that a running auto partial stats job +// doesn't prevent any new full or partial stat jobs from running, except for +// auto partial stat jobs on the same table. +func TestBackgroundAutoPartialStats(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + var allowRequest chan struct{} + + filter, setTableID := createStatsRequestFilter(&allowRequest) + var params base.TestClusterArgs + params.ServerArgs.Knobs.JobsTestingKnobs = jobs.NewTestingKnobsWithShortIntervals() + params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{ + TestingRequestFilter: filter, + } + params.ServerArgs.DefaultTestTenant = base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(109379) + + ctx := context.Background() + const nodes = 1 + tc := testcluster.StartTestCluster(t, nodes, params) + defer tc.Stopper().Stop(ctx) + conn := tc.ApplicationLayer(0).SQLConn(t) + sqlDB := sqlutils.MakeSQLRunner(conn) + + sqlDB.Exec(t, `CREATE DATABASE d`) + sqlDB.Exec(t, `CREATE TABLE d.t1 (x INT PRIMARY KEY)`) + sqlDB.Exec(t, `CREATE TABLE d.t2 (x INT PRIMARY KEY)`) + sqlDB.Exec(t, `INSERT INTO d.t1 SELECT generate_series(1,1000)`) + sqlDB.Exec(t, `INSERT INTO d.t2 SELECT generate_series(1,1000)`) + var t1ID descpb.ID + sqlDB.QueryRow(t, `SELECT 'd.t1'::regclass::int`).Scan(&t1ID) + setTableID(t1ID) + + // Collect full stats on both tables so that future partial stats can be + // collected + allowRequest = make(chan struct{}) + close(allowRequest) + if _, err := conn.Exec(`CREATE STATISTICS full_statistic FROM d.t1`); err != nil { + t.Fatalf("create stats job should have completed: %s", err) + } + if _, err := conn.Exec(`CREATE STATISTICS full_statistic FROM d.t2`); err != nil { + t.Fatalf("create stats job should have completed: %s", err) + } + + // Start an auto partial stat run on t1 and wait until it's done one scan. + // This will be the stat job that runs in the background as we test the + // behavior of new stat jobs. + allowRequest = make(chan struct{}) + runningAutoPartialStatErrCh := make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS __auto_partial__ FROM d.t1 USING EXTREMES`) + runningAutoPartialStatErrCh <- err + }() + select { + case allowRequest <- struct{}{}: + case err := <-runningAutoPartialStatErrCh: + t.Fatal(err) + } + + // Attempt to start a simultaneous auto full stat run. It should succeed. + autoFullStatErrCh := make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS __auto__ FROM d.t1`) + autoFullStatErrCh <- err + }() + + select { + case allowRequest <- struct{}{}: + case err := <-runningAutoPartialStatErrCh: + t.Fatal(err) + case err := <-autoFullStatErrCh: + t.Fatal(err) + } + + // Allow both auto stat jobs to complete. + close(allowRequest) + + // Verify that both jobs completed successfully. + if err := <-autoFullStatErrCh; err != nil { + t.Fatalf("create auto full stats job should have completed: %s", err) + } + if err := <-runningAutoPartialStatErrCh; err != nil { + t.Fatalf("create auto partial stats job should have completed: %s", err) + } + + // Start another auto partial stat run and wait until it's done one scan. + allowRequest = make(chan struct{}) + runningAutoPartialStatErrCh = make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS __auto_partial__ FROM d.t1 USING EXTREMES`) + runningAutoPartialStatErrCh <- err + }() + select { + case allowRequest <- struct{}{}: + case err := <-runningAutoPartialStatErrCh: + t.Fatal(err) + } + + // Attempt to start a simultaneous auto partial stat run on the same table. + // It should fail. + _, err := conn.Exec(`CREATE STATISTICS __auto_partial__ FROM d.t1 USING EXTREMES`) + expected := "another CREATE STATISTICS job is already running" + if !testutils.IsError(err, expected) { + t.Fatalf("expected '%s' error, but got %v", expected, err) + } + + // Attempt to start a simultaneous auto partial stat run on a different table. + // It should succeed. + autoPartialStatErrCh := make(chan error) + go func() { + _, err := conn.Exec(`CREATE STATISTICS __auto_partial__ FROM d.t2 USING EXTREMES`) + autoPartialStatErrCh <- err + }() + + select { + case allowRequest <- struct{}{}: + case err = <-runningAutoPartialStatErrCh: + t.Fatal(err) + case err = <-autoPartialStatErrCh: + t.Fatal(err) + } + + // Allow both auto partial stat jobs to complete. + close(allowRequest) + + // Verify that both jobs completed successfully. + if err = <-autoPartialStatErrCh; err != nil { + t.Fatalf("create auto partial stats job should have completed: %s", err) + } + if err = <-runningAutoPartialStatErrCh; err != nil { + t.Fatalf("create auto partial stats job should have completed: %s", err) + } } func TestDeleteFailedJob(t *testing.T) { diff --git a/pkg/sql/stats/histogram.go b/pkg/sql/stats/histogram.go index 0f3cd39f2e05..3b72cc2a5d7e 100644 --- a/pkg/sql/stats/histogram.go +++ b/pkg/sql/stats/histogram.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/container/heap" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) @@ -50,6 +51,26 @@ var HistogramClusterMode = settings.RegisterBoolSetting( true, settings.WithPublic) +// HistogramMCVsClusterMode controls the cluster setting for enabling +// inclusion of the most common values as buckets in the histogram. +var HistogramMCVsClusterMode = settings.RegisterBoolSetting( + settings.ApplicationLevel, + "sql.stats.histogram_buckets.include_most_common_values.enabled", + "whether to include most common values as histogram buckets", + true, + settings.WithPublic) + +// MaxFractionHistogramMCVs controls the cluster setting for the maximum +// fraction of buckets in a histogram to use for tracking most common values. +// This setting only matters if HistogramMCVsClusterMode is set to true. +var MaxFractionHistogramMCVs = settings.RegisterFloatSetting( + settings.ApplicationLevel, + "sql.stats.histogram_buckets.max_fraction_most_common_values", + "maximum fraction of histogram buckets to use for most common values", + 0.1, + settings.NonNegativeFloatWithMaximum(1), + settings.WithPublic) + // HistogramVersion identifies histogram versions. type HistogramVersion uint32 @@ -167,7 +188,7 @@ func EquiDepthHistogram( return HistogramData{}, nil, errors.Errorf("histogram requires distinctCount > 0") } - h, err := equiDepthHistogramWithoutAdjustment(ctx, compareCtx, samples, numRows, maxBuckets) + h, err := equiDepthHistogramWithoutAdjustment(ctx, compareCtx, samples, numRows, maxBuckets, st) if err != nil { return HistogramData{}, nil, err } @@ -232,13 +253,13 @@ func ConstructExtremesHistogram( var upperHist histogram var err error if len(lowerSamples) > 0 { - lowerHist, err = equiDepthHistogramWithoutAdjustment(ctx, compareCtx, lowerSamples, estNumRowsLower, maxBuckets/2) + lowerHist, err = equiDepthHistogramWithoutAdjustment(ctx, compareCtx, lowerSamples, estNumRowsLower, maxBuckets/2, st) if err != nil { return HistogramData{}, nil, err } } if len(upperSamples) > 0 { - upperHist, err = equiDepthHistogramWithoutAdjustment(ctx, compareCtx, upperSamples, estNumRowsUpper, maxBuckets/2) + upperHist, err = equiDepthHistogramWithoutAdjustment(ctx, compareCtx, upperSamples, estNumRowsUpper, maxBuckets/2, st) if err != nil { return HistogramData{}, nil, err } @@ -258,6 +279,7 @@ func equiDepthHistogramWithoutAdjustment( samples tree.Datums, numRows int64, maxBuckets int, + st *cluster.Settings, ) (histogram, error) { numSamples := len(samples) if maxBuckets < 2 { @@ -283,6 +305,22 @@ func equiDepthHistogramWithoutAdjustment( if maxBuckets > numSamples { numBuckets = numSamples } + + // Find the most common values in the set of samples. + // mcvs contains the indexes in samples of the last instance of each of the + // most common values (MCVs), in index order. + // j keeps track of the current MCV and advances as the MCVs are accounted for. + var mcvs []int + j := 0 + if HistogramMCVsClusterMode.Get(&st.SV) { + maxMCVs := getMaxMCVs(st, numBuckets) + var err error + mcvs, err = getMCVs(ctx, compareCtx, samples, maxMCVs) + if err != nil { + return histogram{}, err + } + } + h := histogram{buckets: make([]cat.HistogramBucket, 0, numBuckets)} lowerBound := samples[0] @@ -294,6 +332,18 @@ func equiDepthHistogramWithoutAdjustment( if i == 0 || numSamplesInBucket < 1 { numSamplesInBucket = 1 } + // Use a MCV as the upper bound if it would otherwise be lost in the bucket. + // As a result, the bucket may be smaller than the target for an equi-depth + // histogram, but this ensures we have accurate counts for the heavy hitters. + if j < len(mcvs) && mcvs[j] < i+numSamplesInBucket-1 { + numSamplesInBucket = mcvs[j] - i + 1 + j++ + // If this would have been the last bucket, we need to add one more bucket + // to accommodate the rest of the samples. + if b == numBuckets-1 { + numBuckets++ + } + } upper := samples[i+numSamplesInBucket-1] // numLess is the number of samples less than upper (in this bucket). numLess := 0 @@ -314,6 +364,11 @@ func equiDepthHistogramWithoutAdjustment( break } } + // If we happened to land on a heavy hitter, advance j to mark the MCV as + // accounted for. + if j < len(mcvs) && mcvs[j] == i+numSamplesInBucket-1 { + j++ + } // Estimate the number of rows equal to the upper bound and less than the // upper bound, as well as the number of distinct values less than the upper @@ -740,6 +795,76 @@ func (h *histogram) addOuterBuckets( } } +// getMaxMCVs returns the maximum number of most common values. +// Postgres uses a more complex formula to determine the number of MCVs, +// (see https://github.com/postgres/postgres/blob/REL_17_STABLE/src/backend/commands/analyze.c#L2934) +// but start simple for now with just a fraction of the buckets defined +// by MaxFractionHistogramMCVs. +func getMaxMCVs(st *cluster.Settings, maxBuckets int) int { + maxFraction := MaxFractionHistogramMCVs.Get(&st.SV) + return int(float64(maxBuckets) * maxFraction) +} + +// getMCVs returns the indexes in samples of the last instance of each of the +// most common values, in index order. For example, if samples contains +// [ a, a, a, b, c, c ], and maxMCVs is 2, getMCVs returns [ 2, 5 ]. +func getMCVs( + ctx context.Context, compareCtx tree.CompareContext, samples tree.Datums, maxMCVs int, +) ([]int, error) { + if len(samples) == 0 { + return nil, errors.AssertionFailedf("empty samples passed to getMCVs") + } + + // Use a heap to find the most common values. + h := make(MCVHeap, 0, maxMCVs+1) + heap.Init[MCV](&h) + count := 1 + distinctValues := 0 + for i := 1; i < len(samples); i++ { + if c, err := samples[i].Compare(ctx, compareCtx, samples[i-1]); err != nil { + return nil, err + } else if c < 0 { + return nil, errors.AssertionFailedf("%+v", "samples not sorted") + } else if c > 0 { + heap.Push[MCV](&h, MCV{ + idx: i - 1, + count: count, + }) + if len(h) > maxMCVs { + heap.Pop[MCV](&h) + } + count = 1 + distinctValues++ + } else { + count++ + } + } + // Add the last value. + heap.Push[MCV](&h, MCV{ + idx: len(samples) - 1, + count: count, + }) + if len(h) > maxMCVs { + heap.Pop[MCV](&h) + } + distinctValues++ + + // Only keep the values that are actually common. If the frequency of any + // value is less than or equal to the average sample frequency, remove it. + expectedCount := len(samples) / distinctValues + for len(h) > 0 && h[0].count <= expectedCount { + heap.Pop[MCV](&h) + } + + // Return just the indexes in increasing order. + mcvs := make([]int, 0, len(h)) + for i := range h { + mcvs = append(mcvs, h[i].idx) + } + sort.Ints(mcvs) + return mcvs, nil +} + // toHistogramData converts a histogram to a HistogramData protobuf with the // given type. func (h histogram) toHistogramData( diff --git a/pkg/sql/stats/histogram_test.go b/pkg/sql/stats/histogram_test.go index c9b4d301808e..e8447172590c 100644 --- a/pkg/sql/stats/histogram_test.go +++ b/pkg/sql/stats/histogram_test.go @@ -38,11 +38,12 @@ type expBucket struct { func TestEquiDepthHistogram(t *testing.T) { testCases := []struct { - samples []int64 - numRows int64 - distinctCount int64 - maxBuckets int - buckets []expBucket + samples []int64 + numRows int64 + distinctCount int64 + maxBuckets int + maxFractionMCVs float64 + buckets []expBucket }{ { samples: []int64{1, 2, 4, 5, 5, 9}, @@ -228,6 +229,122 @@ func TestEquiDepthHistogram(t *testing.T) { maxBuckets: 2, buckets: []expBucket{}, }, + { + samples: []int64{ + 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, + }, + numRows: 12, + distinctCount: 10, + maxBuckets: 3, + buckets: []expBucket{ + { + // Bucket contains 1. + upper: 1, numEq: 1, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 2, 2, 3, 4, 5. + upper: 5, numEq: 1, numLess: 4, distinctLess: 3, + }, + { + // Bucket contains 6, 7, 8, 8, 9, 10. + upper: 10, numEq: 1, numLess: 5, distinctLess: 4, + }, + }, + }, + { + // Same test as the previous one, but using 67% of buckets as MCVs. As a + // result, the bucket boundaries are shifted and we have an additional + // bucket output. + samples: []int64{ + 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, + }, + numRows: 12, + distinctCount: 10, + maxBuckets: 3, + maxFractionMCVs: 0.67, + buckets: []expBucket{ + { + // Bucket contains 1. + upper: 1, numEq: 1, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 2, 2. + upper: 2, numEq: 2, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 3, 4, 5, 6, 7, 8, 8. + upper: 8, numEq: 2, numLess: 6, distinctLess: 5, + }, + { + // Bucket contains 9, 10. + upper: 10, numEq: 1, numLess: 1, distinctLess: 1, + }, + }, + }, + { + // With 5 buckets, no MCVs. + samples: []int64{ + 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, + }, + numRows: 12, + distinctCount: 10, + maxBuckets: 5, + buckets: []expBucket{ + { + // Bucket contains 1. + upper: 1, numEq: 1, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 2, 2. + upper: 2, numEq: 2, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 3, 4, 5, + upper: 5, numEq: 1, numLess: 2, distinctLess: 2, + }, + { + // Bucket contains 6, 7, 8, 8. + upper: 8, numEq: 2, numLess: 2, distinctLess: 2, + }, + { + // Bucket contains 9, 10. + upper: 10, numEq: 1, numLess: 1, distinctLess: 1, + }, + }, + }, + { + // When we add MCVs, the output doesn't change since the MCVs already + // align with bucket boundaries. + samples: []int64{ + 1, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, + }, + numRows: 12, + distinctCount: 10, + maxBuckets: 5, + maxFractionMCVs: 0.4, + buckets: []expBucket{ + { + // Bucket contains 1. + upper: 1, numEq: 1, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 2, 2. + upper: 2, numEq: 2, numLess: 0, distinctLess: 0, + }, + { + // Bucket contains 3, 4, 5, + upper: 5, numEq: 1, numLess: 2, distinctLess: 2, + }, + { + // Bucket contains 6, 7, 8, 8. + upper: 8, numEq: 2, numLess: 2, distinctLess: 2, + }, + { + // Bucket contains 9, 10. + upper: 10, numEq: 1, numLess: 1, distinctLess: 1, + }, + }, + }, } ctx := context.Background() @@ -245,6 +362,8 @@ func TestEquiDepthHistogram(t *testing.T) { samples[i] = tree.NewDInt(tree.DInt(val)) } + MaxFractionHistogramMCVs.Override(ctx, &st.SV, tc.maxFractionMCVs) + h, _, err := EquiDepthHistogram( ctx, evalCtx, types.Int, samples, tc.numRows, tc.distinctCount, tc.maxBuckets, st, ) @@ -932,6 +1051,79 @@ func TestAdjustCounts(t *testing.T) { }) } +func TestGetMCVs(t *testing.T) { + testCases := []struct { + samples []int64 + maxMCVs int + expected []int + }{ + { + samples: []int64{1, 1, 2, 4, 5, 5, 9, 9}, + maxMCVs: 2, + expected: []int{1, 7}, + }, + { + // Only one value is common. + samples: []int64{1, 2, 4, 5, 5, 9}, + maxMCVs: 2, + expected: []int{4}, + }, + { + // No value is more common than any other. + samples: []int64{1, 2, 4, 5, 9}, + maxMCVs: 2, + expected: []int{}, + }, + { + samples: []int64{1, 1, 2, 4, 5, 5, 5, 9, 9}, + maxMCVs: 2, + expected: []int{6, 8}, + }, + { + samples: []int64{1, 1, 2, 4, 5, 5, 9, 9, 9}, + maxMCVs: 1, + expected: []int{8}, + }, + { + samples: []int64{1, 1, 1, 2, 4, 5, 5, 9, 9}, + maxMCVs: 1, + expected: []int{2}, + }, + { + // Only 3 values are common. + samples: []int64{1, 1, 2, 4, 5, 5, 9, 9, 9}, + maxMCVs: 4, + expected: []int{1, 5, 8}, + }, + { + samples: []int64{1, 2, 3, 3}, + maxMCVs: 0, + expected: []int{}, + }, + } + + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + evalCtx := eval.NewTestingEvalContext(st) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + samples := make(tree.Datums, len(tc.samples)) + for i := range samples { + samples[i] = tree.NewDInt(tree.DInt(tc.samples[i])) + } + + mcvs, err := getMCVs(ctx, evalCtx, samples, tc.maxMCVs) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(mcvs, tc.expected) { + t.Errorf("actual mcvs (%v) != expected mcvs (%v)", mcvs, tc.expected) + } + }) + } +} + func makeEnums(t *testing.T) tree.Datums { t.Helper() enumMembers := []string{"a", "b", "c", "d", "e"} @@ -1019,7 +1211,7 @@ func validateHistogramBuckets(t *testing.T, expected []expBucket, h HistogramDat } exp := expected[i] if int64(*val.(*tree.DInt)) != int64(exp.upper) { - t.Errorf("bucket %d: incorrect boundary %d, expected %d", i, val, exp.upper) + t.Errorf("bucket %d: incorrect boundary %d, expected %d", i, *val.(*tree.DInt), exp.upper) } if b.NumEq != exp.numEq { t.Errorf("bucket %d: incorrect EqRows %d, expected %d", i, b.NumEq, exp.numEq) diff --git a/pkg/sql/stats/most_common_values.go b/pkg/sql/stats/most_common_values.go new file mode 100644 index 000000000000..40fb29aa2b1d --- /dev/null +++ b/pkg/sql/stats/most_common_values.go @@ -0,0 +1,41 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package stats + +// MCV contains an index into a slice of samples, and the count of samples equal +// to the sample at idx. +type MCV struct { + idx int + count int +} + +// An MCVHeap is used to track the most common values in a slice of samples. +type MCVHeap []MCV + +func (h MCVHeap) Len() int { return len(h) } +func (h MCVHeap) Less(i, j int) bool { + return h[i].count < h[j].count +} +func (h MCVHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *MCVHeap) Push(x MCV) { + // Push and Pop use pointer receivers because they modify the slice's length, + // not just its contents. + *h = append(*h, x) +} + +func (h *MCVHeap) Pop() MCV { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} diff --git a/pkg/sql/stats/quantile.go b/pkg/sql/stats/quantile.go index abc4e6137f23..4ddb7fe686a5 100644 --- a/pkg/sql/stats/quantile.go +++ b/pkg/sql/stats/quantile.go @@ -358,10 +358,11 @@ func isValidCount(x float64) bool { // toQuantileValue converts from a datum to a float suitable for use in a quantile // function. It differs from eval.PerformCast in a few ways: -// 1. It supports conversions that are not legal casts (e.g. DATE to FLOAT). -// 2. It errors on NaN and infinite values because they will break our model. -// fromQuantileValue is the inverse of this function, and together they should -// support round-trip conversions. +// 1. It supports conversions that are not legal casts (e.g. DATE to FLOAT). +// 2. It errors on NaN and infinite values because they will break our model. +// fromQuantileValue is the inverse of this function, and together they should +// support round-trip conversions. +// // TODO(michae2): Add support for DECIMAL, TIME, TIMETZ, and INTERVAL. func toQuantileValue(d tree.Datum) (float64, error) { switch v := d.(type) { @@ -380,14 +381,20 @@ func toQuantileValue(d tree.Datum) (float64, error) { // converting back. return float64(v.PGEpochDays()), nil case *tree.DTimestamp: - if v.Equal(pgdate.TimeInfinity) || v.Equal(pgdate.TimeNegativeInfinity) { - return 0, tree.ErrFloatOutOfRange + if v.Equal(pgdate.TimeInfinity) { + return pgdate.TimeInfinitySec, nil + } + if v.Equal(pgdate.TimeNegativeInfinity) { + return pgdate.TimeNegativeInfinitySec, nil } return float64(v.Unix()) + float64(v.Nanosecond())*1e-9, nil case *tree.DTimestampTZ: // TIMESTAMPTZ doesn't store a timezone, so this is the same as TIMESTAMP. - if v.Equal(pgdate.TimeInfinity) || v.Equal(pgdate.TimeNegativeInfinity) { - return 0, tree.ErrFloatOutOfRange + if v.Equal(pgdate.TimeInfinity) { + return pgdate.TimeInfinitySec, nil + } + if v.Equal(pgdate.TimeNegativeInfinity) { + return pgdate.TimeNegativeInfinitySec, nil } return float64(v.Unix()) + float64(v.Nanosecond())*1e-9, nil default: @@ -395,17 +402,6 @@ func toQuantileValue(d tree.Datum) (float64, error) { } } -var ( - // quantileMinTimestamp is an alternative minimum finite DTimestamp value to - // avoid the problems around TimeNegativeInfinity, see #41564. - quantileMinTimestamp = tree.MinSupportedTime.Add(time.Second) - quantileMinTimestampSec = float64(quantileMinTimestamp.Unix()) - // quantileMaxTimestamp is an alternative maximum finite DTimestamp value to - // avoid the problems around TimeInfinity, see #41564. - quantileMaxTimestamp = tree.MaxSupportedTime.Add(-1 * time.Second).Truncate(time.Second) - quantileMaxTimestampSec = float64(quantileMaxTimestamp.Unix()) -) - // fromQuantileValue converts from a quantile value back to a datum suitable for // use in a histogram. It is the inverse of toQuantileValue. It differs from // eval.PerformCast in a few ways: @@ -471,11 +467,11 @@ func fromQuantileValue(colType *types.T, val float64) (tree.Datum, error) { case types.TimestampFamily, types.TimestampTZFamily: sec, frac := math.Modf(val) var t time.Time - // Clamp to (our alternative finite) DTimestamp bounds. - if sec <= quantileMinTimestampSec { - t = quantileMinTimestamp - } else if sec >= quantileMaxTimestampSec { - t = quantileMaxTimestamp + // Clamp to DTimestamp bounds. + if sec <= pgdate.TimeNegativeInfinitySec { + t = pgdate.TimeNegativeInfinity + } else if sec >= pgdate.TimeInfinitySec { + t = pgdate.TimeInfinity } else { t = timeutil.Unix(int64(sec), int64(frac*1e9)) } diff --git a/pkg/sql/stats/quantile_test.go b/pkg/sql/stats/quantile_test.go index 7fef6712431b..9d342cf3cd20 100644 --- a/pkg/sql/stats/quantile_test.go +++ b/pkg/sql/stats/quantile_test.go @@ -198,9 +198,9 @@ func randBounds(colType *types.T, rng *rand.Rand, num int) tree.Datums { case types.TimestampFamily, types.TimestampTZFamily: roundTo := tree.TimeFamilyPrecisionToRoundDuration(colType.Precision()) var lo, hi int - if quantileMaxTimestampSec < math.MaxInt/2 { - lo = int(quantileMinTimestampSec) - hi = int(quantileMaxTimestampSec) + if pgdate.TimeInfinitySec < math.MaxInt/2 { + lo = int(pgdate.TimeNegativeInfinitySec) + hi = int(pgdate.TimeInfinitySec) } else { // Make sure we won't overflow in randInts (i.e. make sure that // hi - lo + 1 <= math.MaxInt which requires -2 for hi). @@ -798,23 +798,13 @@ func TestQuantileValueRoundTrip(t *testing.T) { }, { typ: types.Timestamp, - dat: &tree.DTimestamp{Time: quantileMinTimestamp}, - val: quantileMinTimestampSec, - }, - { - typ: types.Timestamp, - dat: &tree.DTimestamp{Time: quantileMaxTimestamp}, - val: quantileMaxTimestampSec, + dat: &tree.DTimestamp{Time: pgdate.TimeInfinity}, + val: pgdate.TimeInfinitySec, }, { typ: types.Timestamp, dat: &tree.DTimestamp{Time: pgdate.TimeNegativeInfinity}, - err: true, - }, - { - typ: types.Timestamp, - dat: &tree.DTimestamp{Time: pgdate.TimeInfinity}, - err: true, + val: pgdate.TimeNegativeInfinitySec, }, { typ: types.TimestampTZ, @@ -823,23 +813,13 @@ func TestQuantileValueRoundTrip(t *testing.T) { }, { typ: types.TimestampTZ, - dat: &tree.DTimestampTZ{Time: quantileMinTimestamp}, - val: quantileMinTimestampSec, - }, - { - typ: types.TimestampTZ, - dat: &tree.DTimestampTZ{Time: quantileMaxTimestamp}, - val: quantileMaxTimestampSec, + dat: &tree.DTimestampTZ{Time: pgdate.TimeInfinity}, + val: pgdate.TimeInfinitySec, }, { typ: types.TimestampTZ, dat: &tree.DTimestampTZ{Time: pgdate.TimeNegativeInfinity}, - err: true, - }, - { - typ: types.TimestampTZ, - dat: &tree.DTimestampTZ{Time: pgdate.TimeInfinity}, - err: true, + val: pgdate.TimeNegativeInfinitySec, }, } ctx := context.Background() @@ -1074,50 +1054,50 @@ func TestQuantileValueRoundTripOverflow(t *testing.T) { { typ: types.Timestamp, val: float64(pgdate.TimeNegativeInfinity.Unix()), - dat: &tree.DTimestamp{Time: quantileMinTimestamp}, - res: quantileMinTimestampSec, + dat: &tree.DTimestamp{Time: pgdate.TimeNegativeInfinity}, + res: pgdate.TimeNegativeInfinitySec, }, { typ: types.Timestamp, val: float64(pgdate.TimeInfinity.Unix()), - dat: &tree.DTimestamp{Time: quantileMaxTimestamp}, - res: quantileMaxTimestampSec, + dat: &tree.DTimestamp{Time: pgdate.TimeInfinity}, + res: pgdate.TimeInfinitySec, }, { typ: types.Timestamp, val: -math.MaxFloat64, - dat: &tree.DTimestamp{Time: quantileMinTimestamp}, - res: quantileMinTimestampSec, + dat: &tree.DTimestamp{Time: pgdate.TimeNegativeInfinity}, + res: pgdate.TimeNegativeInfinitySec, }, { typ: types.Timestamp, val: math.MaxFloat64, - dat: &tree.DTimestamp{Time: quantileMaxTimestamp}, - res: quantileMaxTimestampSec, + dat: &tree.DTimestamp{Time: pgdate.TimeInfinity}, + res: pgdate.TimeInfinitySec, }, { typ: types.TimestampTZ, val: float64(pgdate.TimeNegativeInfinity.Unix()), - dat: &tree.DTimestampTZ{Time: quantileMinTimestamp}, - res: quantileMinTimestampSec, + dat: &tree.DTimestampTZ{Time: pgdate.TimeNegativeInfinity}, + res: pgdate.TimeNegativeInfinitySec, }, { typ: types.TimestampTZ, val: float64(pgdate.TimeInfinity.Unix()), - dat: &tree.DTimestampTZ{Time: quantileMaxTimestamp}, - res: quantileMaxTimestampSec, + dat: &tree.DTimestampTZ{Time: pgdate.TimeInfinity}, + res: pgdate.TimeInfinitySec, }, { typ: types.TimestampTZ, val: -math.MaxFloat64, - dat: &tree.DTimestampTZ{Time: quantileMinTimestamp}, - res: quantileMinTimestampSec, + dat: &tree.DTimestampTZ{Time: pgdate.TimeNegativeInfinity}, + res: pgdate.TimeNegativeInfinitySec, }, { typ: types.TimestampTZ, val: math.MaxFloat64, - dat: &tree.DTimestampTZ{Time: quantileMaxTimestamp}, - res: quantileMaxTimestampSec, + dat: &tree.DTimestampTZ{Time: pgdate.TimeInfinity}, + res: pgdate.TimeInfinitySec, }, } ctx := context.Background() diff --git a/pkg/sql/storageparam/tablestorageparam/table_storage_param.go b/pkg/sql/storageparam/tablestorageparam/table_storage_param.go index ddf3d129224b..9af17568de11 100644 --- a/pkg/sql/storageparam/tablestorageparam/table_storage_param.go +++ b/pkg/sql/storageparam/tablestorageparam/table_storage_param.go @@ -512,6 +512,28 @@ var tableParams = map[string]tableParam{ }), onReset: autoStatsTableSettingResetFunc, }, + catpb.AutoPartialStatsEnabledTableSettingName: { + onSet: autoStatsEnabledSettingFunc, + onReset: autoStatsTableSettingResetFunc, + }, + catpb.AutoPartialStatsMinStaleTableSettingName: { + onSet: autoStatsMinStaleRowsSettingFunc(func(intVal int64) error { + if intVal < 0 { + return errors.Newf("cannot be set to a negative value: %d", intVal) + } + return nil + }), + onReset: autoStatsTableSettingResetFunc, + }, + catpb.AutoPartialStatsFractionStaleTableSettingName: { + onSet: autoStatsFractionStaleRowsSettingFunc(func(floatVal float64) error { + if floatVal < 0 { + return errors.Newf("cannot set to a negative value: %f", floatVal) + } + return nil + }), + onReset: autoStatsTableSettingResetFunc, + }, `sql_stats_forecasts_enabled`: { onSet: func( ctx context.Context, po *Setter, semaCtx *tree.SemaContext, evalCtx *eval.Context, key string, datum tree.Datum, @@ -652,8 +674,16 @@ func autoStatsEnabledSettingFunc( if po.TableDesc.AutoStatsSettings == nil { po.TableDesc.AutoStatsSettings = &catpb.AutoStatsSettings{} } - po.TableDesc.AutoStatsSettings.Enabled = &boolVal - return nil + + switch key { + case catpb.AutoStatsEnabledTableSettingName: + po.TableDesc.AutoStatsSettings.Enabled = &boolVal + return nil + case catpb.AutoPartialStatsEnabledTableSettingName: + po.TableDesc.AutoStatsSettings.PartialEnabled = &boolVal + return nil + } + return errors.AssertionFailedf("unable to set table setting %s", key) } func autoStatsMinStaleRowsSettingFunc( @@ -670,8 +700,16 @@ func autoStatsMinStaleRowsSettingFunc( if err = validateFunc(intVal); err != nil { return errors.Wrapf(err, "invalid integer value for %s", key) } - po.TableDesc.AutoStatsSettings.MinStaleRows = &intVal - return nil + + switch key { + case catpb.AutoStatsMinStaleTableSettingName: + po.TableDesc.AutoStatsSettings.MinStaleRows = &intVal + return nil + case catpb.AutoPartialStatsMinStaleTableSettingName: + po.TableDesc.AutoStatsSettings.PartialMinStaleRows = &intVal + return nil + } + return errors.AssertionFailedf("unable to set table setting %s", key) } } @@ -690,8 +728,16 @@ func autoStatsFractionStaleRowsSettingFunc( if err = validateFunc(floatVal); err != nil { return errors.Wrapf(err, "invalid float value for %s", key) } - po.TableDesc.AutoStatsSettings.FractionStaleRows = &floatVal - return nil + + switch key { + case catpb.AutoStatsFractionStaleTableSettingName: + po.TableDesc.AutoStatsSettings.FractionStaleRows = &floatVal + return nil + case catpb.AutoPartialStatsFractionStaleTableSettingName: + po.TableDesc.AutoStatsSettings.PartialFractionStaleRows = &floatVal + return nil + } + return errors.AssertionFailedf("unable to set table setting %s", key) } } @@ -712,8 +758,17 @@ func autoStatsTableSettingResetFunc( case catpb.AutoStatsFractionStaleTableSettingName: autoStatsSettings.FractionStaleRows = nil return nil + case catpb.AutoPartialStatsEnabledTableSettingName: + autoStatsSettings.PartialEnabled = nil + return nil + case catpb.AutoPartialStatsMinStaleTableSettingName: + autoStatsSettings.PartialMinStaleRows = nil + return nil + case catpb.AutoPartialStatsFractionStaleTableSettingName: + autoStatsSettings.PartialFractionStaleRows = nil + return nil } - return errors.Newf("unable to reset table setting %s", key) + return errors.AssertionFailedf("unable to reset table setting %s", key) } // Set implements the Setter interface. diff --git a/pkg/sql/tablemetadatacache/BUILD.bazel b/pkg/sql/tablemetadatacache/BUILD.bazel new file mode 100644 index 000000000000..279b2de02b24 --- /dev/null +++ b/pkg/sql/tablemetadatacache/BUILD.bazel @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "tablemetadatacache", + srcs = ["update_table_metadata_cache_job.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/tablemetadatacache", + visibility = ["//visibility:public"], + deps = [ + "//pkg/jobs", + "//pkg/jobs/jobspb", + "//pkg/settings/cluster", + "//pkg/sql", + "//pkg/sql/isql", + "//pkg/util/log", + "//pkg/util/metric", + "//pkg/util/timeutil", + "@com_github_cockroachdb_errors//:errors", + "@com_github_prometheus_client_model//go", + ], +) + +go_test( + name = "tablemetadatacache_test", + srcs = [ + "main_test.go", + "update_table_metadata_cache_job_test.go", + ], + deps = [ + ":tablemetadatacache", + "//pkg/base", + "//pkg/jobs", + "//pkg/jobs/jobspb", + "//pkg/security/securityassets", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/server/serverpb", + "//pkg/testutils", + "//pkg/testutils/serverutils", + "//pkg/testutils/sqlutils", + "//pkg/testutils/testcluster", + "//pkg/util/leaktest", + "//pkg/util/log", + "@com_github_cockroachdb_errors//:errors", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/sql/tablemetadatacache/main_test.go b/pkg/sql/tablemetadatacache/main_test.go new file mode 100644 index 000000000000..e2945624b558 --- /dev/null +++ b/pkg/sql/tablemetadatacache/main_test.go @@ -0,0 +1,29 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tablemetadatacache_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security/securityassets" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" +) + +func TestMain(m *testing.M) { + securityassets.SetLoader(securitytest.EmbeddedAssets) + serverutils.InitTestServerFactory(server.TestServerFactory) + serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + os.Exit(m.Run()) +} diff --git a/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go new file mode 100644 index 000000000000..6fc9cd22cb22 --- /dev/null +++ b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tablemetadatacache + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/errors" + io_prometheus_client "github.com/prometheus/client_model/go" +) + +type tableMetadataUpdateJobResumer struct { + job *jobs.Job +} + +var _ jobs.Resumer = (*tableMetadataUpdateJobResumer)(nil) + +// Resume is part of the jobs.Resumer interface. +func (j *tableMetadataUpdateJobResumer) Resume(ctx context.Context, execCtxI interface{}) error { + log.Infof(ctx, "starting table metadata update job") + j.job.MarkIdle(true) + + execCtx := execCtxI.(sql.JobExecContext) + metrics := execCtx.ExecCfg().JobRegistry.MetricsStruct(). + JobSpecificMetrics[jobspb.TypeUpdateTableMetadataCache].(TableMetadataUpdateJobMetrics) + + // We must reset the job's num runs to 0 so that it doesn't get + // delayed by the job system's exponential backoff strategy. + if err := j.job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + if md.RunStats != nil && md.RunStats.NumRuns > 0 { + ju.UpdateRunStats(0, md.RunStats.LastRun) + } + return nil + }); err != nil { + log.Errorf(ctx, "%s", err.Error()) + } + + // Channel used to signal the job should run. + signalCh := execCtx.ExecCfg().SQLStatusServer.GetUpdateTableMetadataCacheSignal() + + for { + select { + case <-signalCh: + log.Infof(ctx, "running table metadata update job") + metrics.NumRuns.Inc(1) + j.updateLastRunTime(ctx) + + // TODO(xinhaoz): implement the actual table metadata update logic. + + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// updateLastRunTime updates the last_run_time field in the job's progress +// details and writes the job progress as a JSON string to the running status. +func (j *tableMetadataUpdateJobResumer) updateLastRunTime(ctx context.Context) { + if err := j.job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + lrt := timeutil.Now() + ju.UpdateProgress(&jobspb.Progress{ + RunningStatus: fmt.Sprintf("last metadata update at %s", lrt), + Details: &jobspb.Progress_TableMetadataCache{ + TableMetadataCache: &jobspb.UpdateTableMetadataCacheProgress{LastRunTime: lrt}, + }, + }) + return nil + }); err != nil { + log.Errorf(ctx, "%s", err.Error()) + } +} + +// OnFailOrCancel implements jobs.Resumer. +func (j *tableMetadataUpdateJobResumer) OnFailOrCancel( + ctx context.Context, execCtx interface{}, jobErr error, +) error { + if jobs.HasErrJobCanceled(jobErr) { + err := errors.NewAssertionErrorWithWrappedErrf( + jobErr, "update table metadata cache job is not cancelable", + ) + log.Errorf(ctx, "%v", err) + } + return nil +} + +// CollectProfile implements jobs.Resumer. +func (j *tableMetadataUpdateJobResumer) CollectProfile( + ctx context.Context, execCtx interface{}, +) error { + return nil +} + +type TableMetadataUpdateJobMetrics struct { + NumRuns *metric.Counter +} + +func (m TableMetadataUpdateJobMetrics) MetricStruct() {} + +func newTableMetadataUpdateJobMetrics() metric.Struct { + return TableMetadataUpdateJobMetrics{ + NumRuns: metric.NewCounter(metric.Metadata{ + Name: "tablemetadatacache.update_job.runs", + Help: "The total number of runs of the update table metadata job.", + Measurement: "Executions", + Unit: metric.Unit_COUNT, + MetricType: io_prometheus_client.MetricType_COUNTER, + }), + } +} + +func init() { + jobs.RegisterConstructor( + jobspb.TypeUpdateTableMetadataCache, + func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer { + return &tableMetadataUpdateJobResumer{job: job} + }, + jobs.DisablesTenantCostControl, + jobs.WithJobMetrics(newTableMetadataUpdateJobMetrics()), + ) +} diff --git a/pkg/sql/tablemetadatacache/update_table_metadata_cache_job_test.go b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job_test.go new file mode 100644 index 000000000000..902f921aab92 --- /dev/null +++ b/pkg/sql/tablemetadatacache/update_table_metadata_cache_job_test.go @@ -0,0 +1,87 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tablemetadatacache_test + +import ( + "context" + "strings" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/sql/tablemetadatacache" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" +) + +// TestUpdateTableMetadataCacheJobRunsOnRPCTrigger tests that +// signalling the update table metadata cache job via the status +// server triggers the job to run. +func TestUpdateTableMetadataCacheJobRunsOnRPCTrigger(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + tc := serverutils.StartCluster(t, 3, base.TestClusterArgs{}) + defer tc.Stopper().Stop(context.Background()) + + conn := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + + // Get the node id that claimed the update job. We'll issue the + // RPC to a node that doesn't own the job to test that the RPC can + // propagate the request to the correct node. + var nodeID int + testutils.SucceedsSoon(t, func() error { + row := conn.Query(t, ` +SELECT claim_instance_id FROM system.jobs +WHERE id = $1 AND claim_instance_id IS NOT NULL`, jobs.UpdateTableMetadataCacheJobID) + if !row.Next() { + return errors.New("no node has claimed the job") + } + require.NoError(t, row.Scan(&nodeID)) + + rpcGatewayNode := (nodeID + 1) % 3 + _, err := tc.Server(rpcGatewayNode).GetStatusClient(t).UpdateTableMetadataCache(ctx, + &serverpb.UpdateTableMetadataCacheRequest{Local: false}) + if err != nil { + return err + } + // The job shouldn't be busy. + return nil + }) + + metrics := tc.Server(0).JobRegistry().(*jobs.Registry).MetricsStruct(). + JobSpecificMetrics[jobspb.TypeUpdateTableMetadataCache].(tablemetadatacache.TableMetadataUpdateJobMetrics) + testutils.SucceedsSoon(t, func() error { + if metrics.NumRuns.Count() != 1 { + return errors.New("job hasn't run yet") + } + row := conn.Query(t, + `SELECT running_status FROM crdb_internal.jobs WHERE job_id = $1 AND running_status IS NOT NULL`, + jobs.UpdateTableMetadataCacheJobID) + if !row.Next() { + return errors.New("last_run_time not updated") + } + var runningStatus string + require.NoError(t, row.Scan(&runningStatus)) + if !strings.Contains(runningStatus, "last metadata update at") { + return errors.New("last run time not updated") + } + return nil + }) +} diff --git a/pkg/sql/tablewriter_upsert_opt.go b/pkg/sql/tablewriter_upsert_opt.go index 2fb979360ac0..10ee266d9111 100644 --- a/pkg/sql/tablewriter_upsert_opt.go +++ b/pkg/sql/tablewriter_upsert_opt.go @@ -267,7 +267,7 @@ func (tu *optTableUpserter) updateConflictingRow( // via GenerateInsertRow(). // - for the fetched part, we assume that the data in the table is // correct already. - if err := enforceLocalColumnConstraints(updateValues, tu.updateCols); err != nil { + if err := enforceNotNullConstraints(updateValues, tu.updateCols); err != nil { return err } diff --git a/pkg/sql/telemetry_logging_test.go b/pkg/sql/telemetry_logging_test.go index d7a2cfa19af9..19d0421cda47 100644 --- a/pkg/sql/telemetry_logging_test.go +++ b/pkg/sql/telemetry_logging_test.go @@ -888,7 +888,7 @@ func TestTelemetryLoggingInternalConsoleEnabled(t *testing.T) { } if found != tc.logInternalConsole { - t.Errorf(tc.errorMessage) + t.Error(tc.errorMessage) } } } diff --git a/pkg/sql/tenant_accessors.go b/pkg/sql/tenant_accessors.go index e0eacca9631f..447c46714451 100644 --- a/pkg/sql/tenant_accessors.go +++ b/pkg/sql/tenant_accessors.go @@ -14,11 +14,9 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfo" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -188,99 +186,3 @@ func GetExtendedTenantInfo( return res, nil } - -var defaultTenantConfigTemplate = settings.RegisterStringSetting( - settings.ApplicationLevel, - "sql.create_tenant.default_template", - "tenant to use as configuration template when LIKE is not specified in CREATE VIRTUAL CLUSTER", - // We use the empty string so that no template is used by default - // (i.e. empty proto, no setting overrides). - "", - settings.WithName("sql.create_virtual_cluster.default_template"), - settings.WithReportable(true), -) - -// GetTenantTemplate loads the tenant template corresponding to the -// provided origin tenant. If info is nil, likeTenantID is zero and -// likeTenantName is empty, the default template is returned. -func GetTenantTemplate( - ctx context.Context, - settings *cluster.Settings, - txn isql.Txn, - info *mtinfopb.TenantInfo, - likeTenantID uint64, - likeTenantName string, -) (res *mtinfopb.TenantInfoWithUsage, err error) { - if info != nil && (likeTenantID != 0 || likeTenantName != "") { - // Sanity check - return nil, errors.AssertionFailedf("programming error: cannot pass both default info struct and tenant reference") - } - if info == nil { - if likeTenantID == 0 && likeTenantName == "" { - // No LIKE at all. Do we have something in the cluster setting? - tmplName := defaultTenantConfigTemplate.Get(&settings.SV) - if tmplName == "" { - // No template at all - just use an empty protobuf. - return &mtinfopb.TenantInfoWithUsage{}, nil - } - // Use the template specified in the setting. - info, err = GetTenantRecordByName(ctx, settings, txn, roachpb.TenantName(tmplName)) - if err != nil { - return nil, errors.Wrapf(err, "retrieving default tenant configuration template %q", tmplName) - } - } else { - if likeTenantID != 0 && likeTenantName != "" { - return nil, errors.AssertionFailedf("programming error: conflicting input tenant spec from caller") - } - // No pre-loaded info, but we have a LIKE clause. Is it by-ID or by-Name? - if likeTenantID != 0 { - // By-ID. - tid, err := roachpb.MakeTenantID(likeTenantID) - if err != nil { - return nil, errors.Wrap(err, "invalid LIKE tenant ID") - } - info, err = GetTenantRecordByID(ctx, txn, tid, settings) - if err != nil { - return nil, errors.Wrap(err, "retrieving LIKE tenant record") - } - } else { - // By-name. - info, err = GetTenantRecordByName(ctx, settings, txn, roachpb.TenantName(likeTenantName)) - if err != nil { - return nil, errors.Wrap(err, "retrieving LIKE tenant record") - } - } - } - } - - // For now, prevent use of the record for the system tenant. The - // user may have the mistaken assumption that "LIKE system" would - // create a tenant with all the special cases of the system tenant, - // and we do not guarantee that for now. - if roachpb.MustMakeTenantID(info.ID).IsSystem() { - return nil, errors.WithHint( - pgerror.New(pgcode.WrongObjectType, "using the system tenant as config template"), - "Create another secondary tenant as template, grant it extra capabilities, and then use that as config template.") - } - - // Now we have our info field. Expand it. - tmplInfo, err := GetExtendedTenantInfo(ctx, txn, info) - if err != nil { - return nil, errors.Wrap(err, "retrieving tenant template details") - } - - // Clear out the fields we can't reuse in a fresh tenant record. - tmplInfo.ID = 0 - tmplInfo.Name = "" - tmplInfo.DataState = mtinfopb.DataStateReady - tmplInfo.ServiceMode = mtinfopb.ServiceModeNone - tmplInfo.DroppedName = "" - tmplInfo.DeprecatedID = 0 - tmplInfo.DeprecatedDataState = 0 - tmplInfo.PhysicalReplicationConsumerJobID = 0 - if tmplInfo.Usage != nil { - tmplInfo.Usage.Consumption = kvpb.TenantConsumption{} - } - - return tmplInfo, nil -} diff --git a/pkg/sql/tenant_creation.go b/pkg/sql/tenant_creation.go index 9bfa6303bf90..2c32739e478d 100644 --- a/pkg/sql/tenant_creation.go +++ b/pkg/sql/tenant_creation.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -67,9 +68,7 @@ func (p *planner) CreateTenant( return tid, pgerror.Newf(pgcode.ProgramLimitExceeded, "tenant ID %d out of range", *ctcfg.ID) } - configTemplate := mtinfopb.TenantInfoWithUsage{} - - return p.createTenantInternal(ctx, ctcfg, &configTemplate) + return p.createTenantInternal(ctx, ctcfg) } type createTenantConfig struct { @@ -80,7 +79,7 @@ type createTenantConfig struct { } func (p *planner) createTenantInternal( - ctx context.Context, ctcfg createTenantConfig, configTemplate *mtinfopb.TenantInfoWithUsage, + ctx context.Context, ctcfg createTenantConfig, ) (tid roachpb.TenantID, err error) { if p.EvalContext().TxnReadOnly { return tid, readOnlyError("create_tenant()") @@ -109,11 +108,7 @@ func (p *planner) createTenantInternal( serviceMode = v } - info := configTemplate - - // Override the template fields for a fresh tenant. The other - // template fields remain unchanged (i.e. we reuse the template's - // configuration). + var info mtinfopb.TenantInfoWithUsage info.ID = tenantID info.Name = name // We synchronously initialize the tenant's keyspace below, so @@ -134,7 +129,7 @@ func (p *planner) createTenantInternal( p.ExecCfg().Settings, p.InternalSQLTxn(), p.ExecCfg().SpanConfigKVAccessor.WithISQLTxn(ctx, p.InternalSQLTxn()), - info, + &info, initialTenantZoneConfig, ctcfg.IfNotExists, p.ExecCfg().TenantTestingKnobs, @@ -146,30 +141,40 @@ func (p *planner) createTenantInternal( return tid, nil } - // Retrieve the possibly auto-generated ID. - tenantID = info.ID - tid = roachpb.MustMakeTenantID(tenantID) + return BootstrapTenant(ctx, p.execCfg, p.Txn(), info, initialTenantZoneConfig) +} + +// BootstrapTenant bootstraps the span of the newly created tenant identified in +// the passed tenant info using the passed zone config. +func BootstrapTenant( + ctx context.Context, + execCfg *ExecutorConfig, + txn *kv.Txn, + info mtinfopb.TenantInfoWithUsage, + zfcg *zonepb.ZoneConfig, +) (roachpb.TenantID, error) { + tid := roachpb.MustMakeTenantID(info.ID) // Initialize the tenant's keyspace. var tenantVersion clusterversion.ClusterVersion - codec := keys.MakeSQLCodec(roachpb.MustMakeTenantID(tenantID)) + codec := keys.MakeSQLCodec(tid) var kvs []roachpb.KeyValue var splits []roachpb.RKey var bootstrapVersionOverride clusterversion.Key switch { - case p.EvalContext().TestingKnobs.TenantLogicalVersionKeyOverride != 0: + case execCfg.EvalContextTestingKnobs.TenantLogicalVersionKeyOverride != 0: // An override was passed using testing knobs. Bootstrap the cluster // using this override. - tenantVersion.Version = p.EvalContext().TestingKnobs.TenantLogicalVersionKeyOverride.Version() - bootstrapVersionOverride = p.EvalContext().TestingKnobs.TenantLogicalVersionKeyOverride - case p.EvalContext().Settings.Version.IsActive(ctx, clusterversion.Latest): + tenantVersion.Version = execCfg.EvalContextTestingKnobs.TenantLogicalVersionKeyOverride.Version() + bootstrapVersionOverride = execCfg.EvalContextTestingKnobs.TenantLogicalVersionKeyOverride + case execCfg.Settings.Version.IsActive(ctx, clusterversion.Latest): // The cluster is running the latest version. // Use this version to create the tenant and bootstrap it using the host // cluster's bootstrapping logic. tenantVersion.Version = clusterversion.Latest.Version() bootstrapVersionOverride = 0 - case p.EvalContext().Settings.Version.IsActive(ctx, clusterversion.PreviousRelease): + case execCfg.Settings.Version.IsActive(ctx, clusterversion.PreviousRelease): // If the previous major version is active, use that version to create the // tenant and bootstrap it just like the previous major version binary // would, using hardcoded initial values. @@ -182,12 +187,12 @@ func (p *planner) createTenantInternal( } initialValuesOpts := bootstrap.InitialValuesOpts{ - DefaultZoneConfig: initialTenantZoneConfig, - DefaultSystemZoneConfig: initialTenantZoneConfig, + DefaultZoneConfig: zfcg, + DefaultSystemZoneConfig: zfcg, OverrideKey: bootstrapVersionOverride, Codec: codec, } - kvs, splits, err = initialValuesOpts.GenerateInitialValues() + kvs, splits, err := initialValuesOpts.GenerateInitialValues() if err != nil { return tid, err } @@ -207,11 +212,11 @@ func (p *planner) createTenantInternal( kvs = append(kvs, tenantSettingKV) } - b := p.Txn().NewBatch() + b := txn.NewBatch() for _, kv := range kvs { b.CPut(kv.Key, &kv.Value, nil) } - if err := p.Txn().Run(ctx, b); err != nil { + if err := txn.Run(ctx, b); err != nil { if errors.HasType(err, (*kvpb.ConditionFailedError)(nil)) { return tid, errors.Wrap(err, "programming error: "+ "tenant already exists but was not in system.tenants table") @@ -232,9 +237,9 @@ func (p *planner) createTenantInternal( // quickly (but asynchronously) be recreated once the KV layer notices the // updated system.tenants table in the gossipped SystemConfig, or if using // the span configs infrastructure, in `system.span_configurations`. - expTime := p.ExecCfg().Clock.Now().Add(time.Hour.Nanoseconds(), 0) + expTime := execCfg.Clock.Now().Add(time.Hour.Nanoseconds(), 0) for _, key := range splits { - if err := p.ExecCfg().DB.AdminSplit(ctx, key, expTime); err != nil { + if err := execCfg.DB.AdminSplit(ctx, key, expTime); err != nil { return tid, err } } diff --git a/pkg/sql/testdata/telemetry/isolation_level b/pkg/sql/testdata/telemetry/isolation_level index 9f1fe628db66..611b7501190c 100644 --- a/pkg/sql/testdata/telemetry/isolation_level +++ b/pkg/sql/testdata/telemetry/isolation_level @@ -3,7 +3,7 @@ SET CLUSTER SETTING sql.txn.read_committed_isolation.enabled = false ---- exec -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = false +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = false ---- feature-list @@ -95,7 +95,7 @@ SET CLUSTER SETTING sql.txn.read_committed_isolation.enabled = true ---- exec -SET CLUSTER SETTING sql.txn.snapshot_isolation.enabled = true +SET CLUSTER SETTING sql.txn.repeatable_read_isolation.enabled = true ---- feature-usage @@ -112,7 +112,6 @@ feature-usage SET default_transaction_isolation = 'repeatable read' ---- sql.txn.isolation.executed_at.read_committed -sql.txn.isolation.upgraded_from.repeatable_read feature-usage SET default_transaction_isolation = 'snapshot' @@ -134,7 +133,6 @@ feature-usage SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ ---- sql.txn.isolation.executed_at.read_committed -sql.txn.isolation.upgraded_from.repeatable_read feature-usage SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT @@ -156,7 +154,6 @@ feature-usage BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; COMMIT ---- sql.txn.isolation.executed_at.snapshot -sql.txn.isolation.upgraded_from.repeatable_read feature-usage BEGIN TRANSACTION ISOLATION LEVEL SNAPSHOT; COMMIT @@ -180,7 +177,6 @@ feature-usage BEGIN; SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; COMMIT ---- sql.txn.isolation.executed_at.snapshot -sql.txn.isolation.upgraded_from.repeatable_read feature-usage BEGIN; SET TRANSACTION ISOLATION LEVEL SNAPSHOT; COMMIT diff --git a/pkg/sql/testdata/telemetry_logging/logging/stmts_per_txn_limit b/pkg/sql/testdata/telemetry_logging/logging/stmts_per_txn_limit index 52662c965ad8..cd0c61429585 100644 --- a/pkg/sql/testdata/telemetry_logging/logging/stmts_per_txn_limit +++ b/pkg/sql/testdata/telemetry_logging/logging/stmts_per_txn_limit @@ -228,4 +228,3 @@ BEGIN; SELECT 1; SELECT 2; SELECT 3; SELECT 4; COMMIT; "TransactionFingerprintID": "2831371359051261045", "User": "root" } - diff --git a/pkg/sql/tests/BUILD.bazel b/pkg/sql/tests/BUILD.bazel index de4791c6ec8f..abf560efa855 100644 --- a/pkg/sql/tests/BUILD.bazel +++ b/pkg/sql/tests/BUILD.bazel @@ -41,7 +41,6 @@ go_test( "hash_sharded_test.go", "impure_builtin_test.go", "inverted_index_test.go", - "job_statement_timeout_test.go", "kv_test.go", "main_test.go", "max_open_txns_test.go", diff --git a/pkg/sql/tests/job_statement_timeout_test.go b/pkg/sql/tests/job_statement_timeout_test.go deleted file mode 100644 index cddc23d74d79..000000000000 --- a/pkg/sql/tests/job_statement_timeout_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2024 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package tests_test - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/stretchr/testify/assert" -) - -func TestBackgroundJobIgnoresStatementTimeout(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - ctx := context.Background() - - testutils.RunTrueAndFalse(t, "test-in-both-legacy-and-declarative-schema-changer", func( - t *testing.T, useDeclarativeSchemaChanger bool, - ) { - s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ - Knobs: base.TestingKnobs{ - JobsTestingKnobs: &jobs.TestingKnobs{ - BeforeUpdate: func(orig, updated jobs.JobMetadata) error { - isCreateIndex := strings.Contains(orig.Payload.Description, "CREATE UNIQUE INDEX bar") - if orig.Payload.Type() != jobspb.TypeNewSchemaChange && !isCreateIndex { - return nil - } - time.Sleep(5 * time.Second) - return nil - }, - }, - }, - }) - defer s.Stopper().Stop(ctx) - tDB := sqlutils.MakeSQLRunner(sqlDB) - - // Set up - if useDeclarativeSchemaChanger { - tDB.Exec(t, "SET use_declarative_schema_changer = on;") - } else { - tDB.Exec(t, "SET use_declarative_schema_changer = off;") - } - tDB.Exec(t, ` -CREATE TABLE t( - a INT PRIMARY KEY, - b INT -); -INSERT INTO t VALUES (1); -`) - - var jobCount int - // We don't set statement timeout earlier because - // migration-job-find-already-completed will fail unless a fairly large - // timeout is set. - tDB.Exec(t, "SET CLUSTER SETTING sql.defaults.statement_timeout = '3s'") - tDB.ExecSucceedsSoon(t, `CREATE UNIQUE INDEX bar ON t (b)`) - q := `SELECT count(*) -FROM crdb_internal.jobs -WHERE job_type ILIKE '%SCHEMA CHANGE%' AND status = 'succeeded' -AND description ILIKE 'CREATE UNIQUE INDEX bar%'` - tDB.QueryRow(t, q).Scan(&jobCount) - // Assert that the job has completed - assert.Equal(t, 1, jobCount) - }) -} diff --git a/pkg/sql/tests/testdata/initial_keys b/pkg/sql/tests/testdata/initial_keys index a7d1396c85eb..1acfec4798af 100644 --- a/pkg/sql/tests/testdata/initial_keys +++ b/pkg/sql/tests/testdata/initial_keys @@ -1,6 +1,6 @@ initial-keys tenant=system ---- -131 keys: +133 keys: /Table/3/1/1/2/1 /Table/3/1/3/2/1 /Table/3/1/4/2/1 @@ -58,6 +58,7 @@ initial-keys tenant=system /Table/3/1/64/2/1 /Table/3/1/65/2/1 /Table/3/1/66/2/1 + /Table/3/1/67/2/1 /Table/5/1/0/2/1 /Table/5/1/1/2/1 /Table/5/1/11/2/1 @@ -116,6 +117,7 @@ initial-keys tenant=system /NamespaceTable/30/1/1/29/"statement_diagnostics_requests"/4/1 /NamespaceTable/30/1/1/29/"statement_execution_insights"/4/1 /NamespaceTable/30/1/1/29/"statement_statistics"/4/1 + /NamespaceTable/30/1/1/29/"table_metadata"/4/1 /NamespaceTable/30/1/1/29/"table_statistics"/4/1 /NamespaceTable/30/1/1/29/"task_payloads"/4/1 /NamespaceTable/30/1/1/29/"tenant_id_seq"/4/1 @@ -132,7 +134,7 @@ initial-keys tenant=system /NamespaceTable/30/1/1/29/"zones"/4/1 /Table/48/1/0/0 /Table/63/1/0/0 -62 splits: +63 splits: /Table/3 /Table/4 /Table/5 @@ -195,10 +197,11 @@ initial-keys tenant=system /Table/64 /Table/65 /Table/66 + /Table/67 initial-keys tenant=5 ---- -122 keys: +124 keys: /Tenant/5/Table/3/1/1/2/1 /Tenant/5/Table/3/1/3/2/1 /Tenant/5/Table/3/1/4/2/1 @@ -256,6 +259,7 @@ initial-keys tenant=5 /Tenant/5/Table/3/1/64/2/1 /Tenant/5/Table/3/1/65/2/1 /Tenant/5/Table/3/1/66/2/1 + /Tenant/5/Table/3/1/67/2/1 /Tenant/5/Table/5/1/0/2/1 /Tenant/5/Table/7/1/0/0 /Tenant/5/Table/8/1/1/0 @@ -305,6 +309,7 @@ initial-keys tenant=5 /Tenant/5/NamespaceTable/30/1/1/29/"statement_diagnostics_requests"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"statement_execution_insights"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"statement_statistics"/4/1 + /Tenant/5/NamespaceTable/30/1/1/29/"table_metadata"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"table_statistics"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"task_payloads"/4/1 /Tenant/5/NamespaceTable/30/1/1/29/"tenant_id_seq"/4/1 @@ -327,7 +332,7 @@ initial-keys tenant=5 initial-keys tenant=999 ---- -122 keys: +124 keys: /Tenant/999/Table/3/1/1/2/1 /Tenant/999/Table/3/1/3/2/1 /Tenant/999/Table/3/1/4/2/1 @@ -385,6 +390,7 @@ initial-keys tenant=999 /Tenant/999/Table/3/1/64/2/1 /Tenant/999/Table/3/1/65/2/1 /Tenant/999/Table/3/1/66/2/1 + /Tenant/999/Table/3/1/67/2/1 /Tenant/999/Table/5/1/0/2/1 /Tenant/999/Table/7/1/0/0 /Tenant/999/Table/8/1/1/0 @@ -434,6 +440,7 @@ initial-keys tenant=999 /Tenant/999/NamespaceTable/30/1/1/29/"statement_diagnostics_requests"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"statement_execution_insights"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"statement_statistics"/4/1 + /Tenant/999/NamespaceTable/30/1/1/29/"table_metadata"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"table_statistics"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"task_payloads"/4/1 /Tenant/999/NamespaceTable/30/1/1/29/"tenant_id_seq"/4/1 diff --git a/pkg/sql/ttl/ttljob/ttljob_processor.go b/pkg/sql/ttl/ttljob/ttljob_processor.go index a58f7ec40a94..f45951389fbb 100644 --- a/pkg/sql/ttl/ttljob/ttljob_processor.go +++ b/pkg/sql/ttl/ttljob/ttljob_processor.go @@ -370,7 +370,7 @@ func (t *ttlProcessor) runTTLOnQueryBounds( return nil } if err := serverCfg.DB.Txn( - ctx, do, isql.SteppingEnabled(), isql.WithPriority(admissionpb.TTLLowPri), + ctx, do, isql.SteppingEnabled(), isql.WithPriority(admissionpb.BulkLowPri), ); err != nil { return spanRowCount, errors.Wrapf(err, "error during row deletion") } diff --git a/pkg/sql/txn_state.go b/pkg/sql/txn_state.go index 92512738ec87..0672ef274d6c 100644 --- a/pkg/sql/txn_state.go +++ b/pkg/sql/txn_state.go @@ -78,8 +78,8 @@ type txnState struct { // autoRetryCounter keeps track of the number of automatic retries that have // occurred. It includes per-statement retries performed under READ // COMMITTED as well as transaction retries for serialization failures under - // SNAPSHOT and SERIALIZABLE. It's 0 whenever the transaction state is not - // stateOpen. + // REPEATABLE READ and SERIALIZABLE. It's 0 whenever the transaction state + // is not stateOpen. autoRetryCounter int32 } diff --git a/pkg/sql/unique_without_index_test.go b/pkg/sql/unique_without_index_test.go index 7009aeac0b7a..0c4c02c040a6 100644 --- a/pkg/sql/unique_without_index_test.go +++ b/pkg/sql/unique_without_index_test.go @@ -40,7 +40,7 @@ func TestUWIConstraintReferencingTypes(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - testutils.RunTrueAndFalse(t, "test-in-both-legacy-and-declarative-schema-changer", func( + testutils.RunTrueAndFalse(t, "use-declarative-schema-changer", func( t *testing.T, useDeclarativeSchemaChanger bool, ) { s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) diff --git a/pkg/sql/update.go b/pkg/sql/update.go index 8e4ffd6b7546..60ffc0233573 100644 --- a/pkg/sql/update.go +++ b/pkg/sql/update.go @@ -16,13 +16,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" - "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" - "github.com/cockroachdb/errors" ) var updateNodePool = sync.Pool{ @@ -58,56 +55,6 @@ type updateRun struct { // traceKV caches the current KV tracing flag. traceKV bool - // computedCols are the columns that need to be (re-)computed as - // the result of updating some of the columns in updateCols. - computedCols []catalog.Column - // computeExprs are the expressions to evaluate to re-compute the - // columns in computedCols. - computeExprs []tree.TypedExpr - // iVarContainerForComputedCols is used as a temporary buffer that - // holds the updated values for every column in the source, to - // serve as input for indexed vars contained in the computeExprs. - iVarContainerForComputedCols schemaexpr.RowIndexedVarContainer - - // sourceSlots is the helper that maps RHS expressions to LHS targets. - // This is necessary because there may be fewer RHS expressions than - // LHS targets. For example, SET (a, b) = (SELECT 1,2) has: - // - 2 targets (a, b) - // - 1 source slot, the subquery (SELECT 1, 2). - // Each call to extractValues() on a sourceSlot will return 1 or more - // datums suitable for assignments. In the example above, the - // method would return 2 values. - sourceSlots []sourceSlot - - // updateValues will hold the new values for every column - // mentioned in the LHS of the SET expressions, in the - // order specified by those SET expressions (thus potentially - // a different order than the source). - updateValues tree.Datums - - // During the update, the expressions provided by the source plan - // contain the columns that are being assigned in the order - // specified by the table descriptor. - // - // For example, with UPDATE kv SET v=3, k=2, the source plan will - // provide the values in the order k, v (assuming this is the order - // the columns are defined in kv's descriptor). - // - // Then during the update, the columns are updated in the order of - // the setExprs (or, equivalently, the order of the sourceSlots), - // for the example above that would be v, k. The results - // are stored in updateValues above. - // - // Then at the end of the update, the values need to be presented - // back to the TableRowUpdater in the order of the table descriptor - // again. - // - // updateVals is the buffer for this 2nd stage. - // updateColsIdx maps the order of the 2nd stage into the order of the 3rd stage. - // This provides the inverse mapping of sourceSlots. - // - updateColsIdx catalog.TableColMap - // rowIdxToRetIdx is the mapping from the columns in ru.FetchCols to the // columns in the resultRowBuffer. A value of -1 is used to indicate // that the column at that index is not part of the resultRowBuffer @@ -225,65 +172,15 @@ func (u *updateNode) processSourceRow(params runParams, sourceVals tree.Datums) // expressions. oldValues := sourceVals[:len(u.run.tu.ru.FetchCols)] - // valueIdx is used in the loop below to map sourceSlots to - // entries in updateValues. - valueIdx := 0 - - // Propagate the values computed for the RHS expressions into - // updateValues at the right positions. The positions in - // updateValues correspond to the columns named in the LHS - // operands for SET. - for _, slot := range u.run.sourceSlots { - for _, value := range slot.extractValues(sourceVals) { - u.run.updateValues[valueIdx] = value - valueIdx++ - } - } - - // At this point, we have populated updateValues with the result of - // computing the RHS for every assignment. - // - - if len(u.run.computeExprs) > 0 { - // We now need to (re-)compute the computed column values, using - // the updated values above as input. - // - // This needs to happen in the context of a row containing all the - // table's columns as if they had been updated already. This is not - // yet reflected neither by oldValues (which contain non-updated values) - // nor updateValues (which contain only those columns mentioned in the SET LHS). - // - // So we need to construct a buffer that groups them together. - // iVarContainerForComputedCols does this. - copy(u.run.iVarContainerForComputedCols.CurSourceRow, oldValues) - for i := range u.run.tu.ru.UpdateCols { - id := u.run.tu.ru.UpdateCols[i].GetID() - idx := u.run.tu.ru.FetchColIDtoRowIndex.GetDefault(id) - u.run.iVarContainerForComputedCols.CurSourceRow[idx] = u.run. - updateValues[i] - } - - // Now (re-)compute the computed columns. - // Note that it's safe to do this in any order, because we currently - // prevent computed columns from depending on other computed columns. - params.EvalContext().PushIVarContainer(&u.run.iVarContainerForComputedCols) - for i := range u.run.computedCols { - d, err := eval.Expr(params.ctx, params.EvalContext(), u.run.computeExprs[i]) - if err != nil { - params.EvalContext().IVarContainer = nil - name := u.run.computedCols[i].GetName() - return errors.Wrapf(err, "computed column %s", tree.ErrString((*tree.Name)(&name))) - } - idx := u.run.updateColsIdx.GetDefault(u.run.computedCols[i].GetID()) - u.run.updateValues[idx] = d - } - params.EvalContext().PopIVarContainer() - } + // The update values follow the fetch values and their order corresponds to the order of ru.UpdateCols. + numFetchCols := len(u.run.tu.ru.FetchCols) + numUpdateCols := len(u.run.tu.ru.UpdateCols) + updateValues := sourceVals[numFetchCols : numFetchCols+numUpdateCols] // Verify the schema constraints. For consistency with INSERT/UPSERT // and compatibility with PostgreSQL, we must do this before // processing the CHECK constraints. - if err := enforceLocalColumnConstraints(u.run.updateValues, u.run.tu.ru.UpdateCols); err != nil { + if err := enforceNotNullConstraints(updateValues, u.run.tu.ru.UpdateCols); err != nil { return err } @@ -317,12 +214,12 @@ func (u *updateNode) processSourceRow(params runParams, sourceVals tree.Datums) // Error out the update if the enforce_home_region session setting is on and // the row's locality doesn't match the gateway region. - if err := u.run.regionLocalInfo.checkHomeRegion(u.run.updateValues); err != nil { + if err := u.run.regionLocalInfo.checkHomeRegion(updateValues); err != nil { return err } // Queue the insert in the KV batch. - newValues, err := u.run.tu.rowForUpdate(params.ctx, oldValues, u.run.updateValues, pm, u.run.traceKV) + newValues, err := u.run.tu.rowForUpdate(params.ctx, oldValues, updateValues, pm, u.run.traceKV) if err != nil { return err } @@ -390,41 +287,8 @@ func (u *updateNode) enableAutoCommit() { u.run.tu.enableAutoCommit() } -// sourceSlot abstracts the idea that our update sources can either be tuples -// or scalars. Tuples are for cases such as SET (a, b) = (1, 2) or SET (a, b) = -// (SELECT 1, 2), and scalars are for situations like SET a = b. A sourceSlot -// represents how to extract and type-check the results of the right-hand side -// of a single SET statement. We could treat everything as tuples, including -// scalars as tuples of size 1, and eliminate this indirection, but that makes -// the query plan more complex. -type sourceSlot interface { - // extractValues returns a slice of the values this slot is responsible for, - // as extracted from the row of results. - extractValues(resultRow tree.Datums) tree.Datums - // checkColumnTypes compares the types of the results that this slot refers to to the types of - // the columns those values will be assigned to. It returns an error if those types don't match up. - checkColumnTypes(row []tree.TypedExpr) error -} - -type scalarSlot struct { - column catalog.Column - sourceIndex int -} - -func (ss scalarSlot) extractValues(row tree.Datums) tree.Datums { - return row[ss.sourceIndex : ss.sourceIndex+1] -} - -func (ss scalarSlot) checkColumnTypes(row []tree.TypedExpr) error { - renderedResult := row[ss.sourceIndex] - typ := renderedResult.ResolvedType() - return colinfo.CheckDatumTypeFitsColumnType(ss.column, typ) -} - -// enforceLocalColumnConstraints asserts the column constraints that do not -// require data validation from other sources than the row data itself. This -// currently only includes checking for null values in non-nullable columns. -func enforceLocalColumnConstraints(row tree.Datums, cols []catalog.Column) error { +// enforceNotNullConstraints enforces NOT NULL column constraints. +func enforceNotNullConstraints(row tree.Datums, cols []catalog.Column) error { for i, col := range cols { if !col.IsNullable() && row[i] == tree.DNull { return sqlerrors.NewNonNullViolationError(col.GetName()) diff --git a/pkg/sql/upsert.go b/pkg/sql/upsert.go index 56d270b9ca56..b0c44cd82ab4 100644 --- a/pkg/sql/upsert.go +++ b/pkg/sql/upsert.go @@ -138,7 +138,7 @@ func (n *upsertNode) BatchedNext(params runParams) (bool, error) { // processSourceRow processes one row from the source for upsertion. // The table writer is in charge of accumulating the result rows. func (n *upsertNode) processSourceRow(params runParams, rowVals tree.Datums) error { - if err := enforceLocalColumnConstraints(rowVals, n.run.insertCols); err != nil { + if err := enforceNotNullConstraints(rowVals, n.run.insertCols); err != nil { return err } diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index fbebc741e769..8ebaf4829f42 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -418,50 +418,22 @@ var varGen = map[string]sessionVar{ `default_transaction_isolation`: { Set: func(ctx context.Context, m sessionDataMutator, s string) error { allowReadCommitted := allowReadCommittedIsolation.Get(&m.settings.SV) - allowSnapshot := allowSnapshotIsolation.Get(&m.settings.SV) + allowRepeatableRead := allowRepeatableReadIsolation.Get(&m.settings.SV) hasLicense := base.CCLDistributionAndEnterpriseEnabled(m.settings) var allowedValues = []string{"serializable"} - if allowSnapshot { - allowedValues = append(allowedValues, "snapshot") + if allowRepeatableRead { + allowedValues = append(allowedValues, "repeatable read") } if allowReadCommitted { allowedValues = append(allowedValues, "read committed") } level, ok := tree.IsolationLevelMap[strings.ToLower(s)] - originalLevel := level - upgraded := false - upgradedDueToLicense := false if !ok { return newVarValueError(`default_transaction_isolation`, s, allowedValues...) } - switch level { - case tree.ReadUncommittedIsolation: - upgraded = true - fallthrough - case tree.ReadCommittedIsolation: - level = tree.SerializableIsolation - if allowReadCommitted && hasLicense { - level = tree.ReadCommittedIsolation - } else { - upgraded = true - if allowReadCommitted && !hasLicense { - upgradedDueToLicense = true - } - } - case tree.RepeatableReadIsolation: - upgraded = true - fallthrough - case tree.SnapshotIsolation: - level = tree.SerializableIsolation - if allowSnapshot && hasLicense { - level = tree.SnapshotIsolation - } else { - upgraded = true - if allowSnapshot && !hasLicense { - upgradedDueToLicense = true - } - } - } + originalLevel := level + level, upgraded, upgradedDueToLicense := level.UpgradeToEnabledLevel( + allowReadCommitted, allowRepeatableRead, hasLicense) if f := m.upgradedIsolationLevel; upgraded && f != nil { f(ctx, originalLevel, upgradedDueToLicense) } @@ -1607,15 +1579,15 @@ var varGen = map[string]sessionVar{ // See https://github.com/postgres/postgres/blob/REL_10_STABLE/src/backend/utils/misc/guc.c#L3401-L3409 `transaction_isolation`: { Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { - level := tree.IsolationLevelFromKVTxnIsolationLevel(evalCtx.Txn.IsoLevel()) + level := tree.FromKVIsoLevel(evalCtx.Txn.IsoLevel()) return strings.ToLower(level.String()), nil }, RuntimeSet: func(ctx context.Context, evalCtx *extendedEvalContext, local bool, s string) error { level, ok := tree.IsolationLevelMap[strings.ToLower(s)] if !ok { var allowedValues = []string{"serializable"} - if allowSnapshotIsolation.Get(&evalCtx.ExecCfg.Settings.SV) { - allowedValues = append(allowedValues, "snapshot") + if allowRepeatableReadIsolation.Get(&evalCtx.ExecCfg.Settings.SV) { + allowedValues = append(allowedValues, "repeatable read") } if allowReadCommittedIsolation.Get(&evalCtx.ExecCfg.Settings.SV) { allowedValues = append(allowedValues, "read committed") @@ -2787,13 +2759,23 @@ var varGen = map[string]sessionVar{ // CockroachDB extension. `enforce_home_region_follower_reads_enabled`: { GetStringVal: makePostgresBoolGetStringValFn(`enforce_home_region_follower_reads_enabled`), - Set: func(_ context.Context, m sessionDataMutator, s string) error { - b, err := paramparse.ParseBoolVar("enforce_home_region_follower_reads_enabled", s) - if err != nil { - return err - } - m.SetEnforceHomeRegionFollowerReadsEnabled(b) - return nil + SetWithPlanner: func(ctx context.Context, p *planner, local bool, s string) error { + p.BufferClientNotice(ctx, pgnotice.Newf( + "enforce_home_region_follower_reads_enabled is deprecated and will be removed in a future "+ + "release", + )) + return p.applyOnSessionDataMutators( + ctx, + local, + func(m sessionDataMutator) error { + b, err := paramparse.ParseBoolVar("enforce_home_region_follower_reads_enabled", s) + if err != nil { + return err + } + m.SetEnforceHomeRegionFollowerReadsEnabled(b) + return nil + }, + ) }, Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { return formatBoolAsPostgresSetting(evalCtx.SessionData().EnforceHomeRegionFollowerReadsEnabled), nil @@ -3488,6 +3470,23 @@ var varGen = map[string]sessionVar{ }, GlobalDefault: globalTrue, }, + + // CockroachDB extension. + `optimizer_push_limit_into_project_filtered_scan`: { + GetStringVal: makePostgresBoolGetStringValFn(`optimizer_push_limit_into_project_filtered_scan`), + Set: func(_ context.Context, m sessionDataMutator, s string) error { + b, err := paramparse.ParseBoolVar("optimizer_push_limit_into_project_filtered_scan", s) + if err != nil { + return err + } + m.SetOptimizerPushLimitIntoProjectFilteredScan(b) + return nil + }, + Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { + return formatBoolAsPostgresSetting(evalCtx.SessionData().OptimizerPushLimitIntoProjectFilteredScan), nil + }, + GlobalDefault: globalTrue, + }, } func ReplicationModeFromString(s string) (sessiondatapb.ReplicationMode, error) { diff --git a/pkg/sql/walk.go b/pkg/sql/walk.go index 80cb5f67b300..0f2fc53fdb86 100644 --- a/pkg/sql/walk.go +++ b/pkg/sql/walk.go @@ -463,7 +463,6 @@ var planNodeNames = map[reflect.Type]string{ reflect.TypeOf(&setVarNode{}): "set", reflect.TypeOf(&setZoneConfigNode{}): "configure zone", reflect.TypeOf(&showFingerprintsNode{}): "show fingerprints", - reflect.TypeOf(&showLogicalReplicationJobsNode{}): "show logical replication jobs", reflect.TypeOf(&showTenantNode{}): "show tenant", reflect.TypeOf(&showTraceNode{}): "show trace for", reflect.TypeOf(&showTraceReplicaNode{}): "replica trace", diff --git a/pkg/storage/bench_test.go b/pkg/storage/bench_test.go index c9ce5d99a64e..13ac4a4c8583 100644 --- a/pkg/storage/bench_test.go +++ b/pkg/storage/bench_test.go @@ -2124,8 +2124,8 @@ func BenchmarkMVCCScannerWithIntentsAndVersions(b *testing.B) { opts := DefaultPebbleOptions().MakeWriterOptions(0, format) writer := sstable.NewWriter(objstorageprovider.NewFileWritable(sstFile), opts) for _, kv := range kvPairs { - require.NoError(b, writer.Raw().Add( - pebble.MakeInternalKey(kv.key, 0 /* seqNum */, kv.kind), kv.value)) + require.NoError(b, writer.Raw().AddWithForceObsolete( + pebble.MakeInternalKey(kv.key, 0 /* seqNum */, kv.kind), kv.value, false /* forceObsolete */)) } require.NoError(b, writer.Close()) batch.Close() diff --git a/pkg/storage/fs/file_registry_test.go b/pkg/storage/fs/file_registry_test.go index e3b8577a0fd9..f8428cfaee75 100644 --- a/pkg/storage/fs/file_registry_test.go +++ b/pkg/storage/fs/file_registry_test.go @@ -624,7 +624,7 @@ func TestFileRegistryKeepOldFilesAndSync(t *testing.T) { skip.UnderRace(t) // Slow under race. const dir = "/mydb" - mem := vfs.NewStrictMem() + mem := vfs.NewCrashableMem() { require.NoError(t, mem.MkdirAll(dir, 0755)) // Sync the root dir so that /mydb does not vanish later. @@ -672,11 +672,11 @@ func TestFileRegistryKeepOldFilesAndSync(t *testing.T) { } expectedFiles = append(expectedFiles, registryFiles[n-1]) // Also check that it matches what is in the filesystem. - lsFiles, err := mem.List(dir) + lsFiles, err := registry.FS.List(dir) require.NoError(t, err) var foundFiles []string for _, f := range lsFiles { - f = mem.PathBase(f) + f = registry.FS.PathBase(f) if strings.HasPrefix(f, registryFilenameBase) { foundFiles = append(foundFiles, f) } @@ -700,21 +700,22 @@ func TestFileRegistryKeepOldFilesAndSync(t *testing.T) { } registryChecker.addEntry(registry) } - // Start ignoring syncs. - mem.SetIgnoreSyncs(true) + // Take a crash-consistent snapshot. + crashFS := mem.CrashClone(vfs.CrashCloneCfg{}) // Add another entry, that will be deliberately lost. registryChecker.addEntry(registry) registryChecker.checkEntries(registry) require.NoError(t, registry.Close()) - mem.ResetToSyncedState() + + numAddedEntries := registryChecker.numAddedEntries + registryChecker = makeFileRegistryEntryChecker(t, crashFS, dir) // Remove the lost entry from what we check. - registryChecker.numAddedEntries-- + registryChecker.numAddedEntries = numAddedEntries - 1 - mem.SetIgnoreSyncs(false) // Keep no old registry files. numOldRegistryFiles = 0 registry = &FileRegistry{ - FS: mem, + FS: crashFS, DBDir: dir, NumOldRegistryFiles: numOldRegistryFiles, SoftMaxSize: 1024, @@ -728,7 +729,7 @@ func TestFileRegistryKeepOldFilesAndSync(t *testing.T) { // Another load, with a different NumOldRegistryFiles, just for fun. numOldRegistryFiles = 1 - registry = &FileRegistry{FS: mem, DBDir: dir, NumOldRegistryFiles: numOldRegistryFiles} + registry = &FileRegistry{FS: crashFS, DBDir: dir, NumOldRegistryFiles: numOldRegistryFiles} require.NoError(t, registry.Load(context.Background())) registryChecker.checkEntries(registry) } diff --git a/pkg/storage/fs/fs.go b/pkg/storage/fs/fs.go index 9fb26db475a4..020a2f9033e1 100644 --- a/pkg/storage/fs/fs.go +++ b/pkg/storage/fs/fs.go @@ -166,16 +166,22 @@ func InitEnv( diskHealthCheckInterval = maxSyncDurationDefault } - // Pass a reference to the stats collector below. Since retrieving the stats - // collector requires the directory to be created first, we defer the - // retrieval to after the directories are created. + // Create the directory if it doesn't already exist. We need to acquire a + // database lock down below, which requires the directory already exist. + if cfg.RW == ReadWrite { + if err := e.UnencryptedFS.MkdirAll(dir, os.ModePerm); err != nil { + return nil, err + } + } + + // Create the stats collector. This has to be done after the directory has + // been created above. var statsCollector *vfs.DiskWriteStatsCollector var err error - retrieveStatsCollector := func() error { - if diskWriteStats != nil && dir != "" { - statsCollector, err = diskWriteStats.GetOrCreateCollector(dir) + if diskWriteStats != nil && dir != "" { + if statsCollector, err = diskWriteStats.GetOrCreateCollector(dir); err != nil { + return nil, errors.Wrap(err, "retrieving stats collector") } - return err } // Instantiate a file system with disk health checking enabled. This FS @@ -189,20 +195,6 @@ func InitEnv( exit.WithCode(exit.DiskFull()) }) - // Create the directory if it doesn't already exist. We need to acquire a - // database lock down below, which requires the directory already exist. - if cfg.RW == ReadWrite { - if err := e.UnencryptedFS.MkdirAll(dir, os.ModePerm); err != nil { - return nil, err - } - } - - // Now that we have created the directories, we can retrieve the stats - // collector. - if err = retrieveStatsCollector(); err != nil { - return nil, errors.Wrap(err, "retrieving stats collector") - } - // Acquire the database lock in the store directory to ensure that no other // process is simultaneously accessing the same store. We manually acquire // the database lock here (rather than allowing Pebble to acquire the lock) @@ -314,6 +306,12 @@ func (e *Env) Close() { } } +// Unwrap is part of the vfs.FS interface. +func (e *Env) Unwrap() vfs.FS { + // We don't want to expose the unencrypted FS. + return nil +} + func (e *Env) onDiskSlow(info vfs.DiskSlowInfo) { if fn := e.onDiskSlowFunc.Load(); fn != nil { (*fn)(info) diff --git a/pkg/storage/fs/safewrite_test.go b/pkg/storage/fs/safewrite_test.go index 85c13b96cbb2..7b64c88a1420 100644 --- a/pkg/storage/fs/safewrite_test.go +++ b/pkg/storage/fs/safewrite_test.go @@ -24,14 +24,14 @@ func TestSafeWriteToFile(t *testing.T) { defer leaktest.AfterTest(t)() // Use an in-memory FS that strictly enforces syncs. - mem := vfs.NewStrictMem() + mem := vfs.NewCrashableMem() syncDir := func(dir string) { fdir, err := mem.OpenDir(dir) require.NoError(t, err) require.NoError(t, fdir.Sync()) require.NoError(t, fdir.Close()) } - readFile := func(filename string) []byte { + readFile := func(mem *vfs.MemFS, filename string) []byte { f, err := mem.Open("foo/bar") require.NoError(t, err) b, err := io.ReadAll(f) @@ -52,14 +52,14 @@ func TestSafeWriteToFile(t *testing.T) { // Discard any unsynced writes to make sure we set up the test // preconditions correctly. - mem.ResetToSyncedState() - require.Equal(t, []byte("Hello world"), readFile("foo/bar")) + crashFS := mem.CrashClone(vfs.CrashCloneCfg{}) + require.Equal(t, []byte("Hello world"), readFile(crashFS, "foo/bar")) // Use SafeWriteToFile to atomically, durably change the contents of the // file. - require.NoError(t, SafeWriteToFile(mem, "foo", "foo/bar", []byte("Hello everyone"), UnspecifiedWriteCategory)) + require.NoError(t, SafeWriteToFile(crashFS, "foo", "foo/bar", []byte("Hello everyone"), UnspecifiedWriteCategory)) // Discard any unsynced writes. - mem.ResetToSyncedState() - require.Equal(t, []byte("Hello everyone"), readFile("foo/bar")) + crashFS = crashFS.CrashClone(vfs.CrashCloneCfg{}) + require.Equal(t, []byte("Hello everyone"), readFile(crashFS, "foo/bar")) } diff --git a/pkg/storage/fs/sticky_vfs.go b/pkg/storage/fs/sticky_vfs.go index cf3eafeac78c..1ee0184c987f 100644 --- a/pkg/storage/fs/sticky_vfs.go +++ b/pkg/storage/fs/sticky_vfs.go @@ -24,9 +24,9 @@ type stickyConfig struct { } // UseStrictMemFS option instructs StickyRegistry to produce strict in-memory -// filesystems, i.e. to use vfs.NewStrictMem instead of vfs.NewMem. +// filesystems, i.e. to use vfs.NewCrashableMem instead of vfs.NewMem. var UseStrictMemFS = StickyOption(func(cfg *stickyConfig) { - cfg.newFS = vfs.NewStrictMem + cfg.newFS = vfs.NewCrashableMem }) // StickyRegistry manages the lifecycle of sticky in-memory filesystems. It @@ -36,6 +36,9 @@ type StickyRegistry interface { // Get returns the named in-memory FS, constructing a new one if this is the // first time a FS with the provided ID has been requested. Get(stickyVFSID string) *vfs.MemFS + // Set changes the named in-memory FS; the given instance will be returned by + // subsequent Gets for this ID. + Set(stickyVFSID string, fs *vfs.MemFS) } // stickyRegistryImpl is the bookkeeper for all active sticky filesystems, @@ -61,7 +64,7 @@ func NewStickyRegistry(opts ...StickyOption) StickyRegistry { return registry } -// Get implements the StickyRegistry interface. +// Get is part of the StickyRegistry interface. func (registry *stickyRegistryImpl) Get(stickyVFSID string) *vfs.MemFS { registry.mu.Lock() defer registry.mu.Unlock() @@ -73,3 +76,11 @@ func (registry *stickyRegistryImpl) Get(stickyVFSID string) *vfs.MemFS { registry.entries[stickyVFSID] = fs return fs } + +// Set is part of the StickyRegistry interface. +func (registry *stickyRegistryImpl) Set(stickyVFSID string, fs *vfs.MemFS) { + registry.mu.Lock() + defer registry.mu.Unlock() + + registry.entries[stickyVFSID] = fs +} diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index dc129f7b3966..b1c2cac148ff 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -6283,12 +6283,15 @@ func MVCCGarbageCollect( ms *enginepb.MVCCStats, keys []kvpb.GCRequest_GCKey, timestamp hlc.Timestamp, -) error { +) (retE error) { var count int64 defer func(begin time.Time) { - log.Eventf(ctx, "done with GC evaluation for %d keys at %.2f keys/sec. Deleted %d entries", - len(keys), float64(len(keys))*1e9/float64(timeutil.Since(begin)), count) + log.Eventf(ctx, "handled %d incoming point keys; deleted %d in %s", + len(keys), count, timeutil.Since(begin)) + if retE != nil { + log.Eventf(ctx, "err: %s", retE) + } }(timeutil.Now()) // If there are no keys then there is no work. @@ -6589,16 +6592,16 @@ type CollectableGCRangeKey struct { // not performed correctly by the level above. func MVCCGarbageCollectRangeKeys( ctx context.Context, rw ReadWriter, ms *enginepb.MVCCStats, rks []CollectableGCRangeKey, -) error { +) (retE error) { var count int64 defer func(begin time.Time) { - // TODO(oleg): this could be misleading if GC fails, but this function still - // reports how many keys were GC'd. The approach is identical to what point - // key GC does for consistency, but both places could be improved. log.Eventf(ctx, - "done with GC evaluation for %d range keys at %.2f keys/sec. Deleted %d entries", - len(rks), float64(len(rks))*1e9/float64(timeutil.Since(begin)), count) + "handled %d incoming range keys; deleted %d fragments in %s", + len(rks), count, timeutil.Since(begin)) + if retE != nil { + log.Eventf(ctx, "err: %s", retE) + } }(timeutil.Now()) if len(rks) == 0 { @@ -6707,9 +6710,12 @@ func MVCCGarbageCollectRangeKeys( if !v.Timestamp.LessEq(gcKey.Timestamp) { break } - if err := rw.ClearMVCCRangeKey(rangeKeys.AsRangeKey(v)); err != nil { + k := rangeKeys.AsRangeKey(v) + log.Eventf(ctx, "clearing rangekey fragment: %s", k) + if err := rw.ClearMVCCRangeKey(k); err != nil { return err } + count++ if ms != nil { ms.Add(updateStatsOnRangeKeyClearVersion(rangeKeys, v)) } diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 653bfe832bed..e4b08c0ba977 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -2534,7 +2534,7 @@ func pebbleFormatVersion(clusterVersion roachpb.Version) pebble.FormatMajorVersi // pebbleFormatVersionKeys are sorted in descending order; find the first one // that is not newer than clusterVersion. for _, k := range pebbleFormatVersionKeys { - if clusterVersion.AtLeast(k.FenceVersion()) { + if clusterVersion.AtLeast(k.Version().FenceVersion()) { return pebbleFormatVersionMap[k] } } diff --git a/pkg/storage/replicas_storage.go b/pkg/storage/replicas_storage.go index 3c32c0664da0..7c98f7b50581 100644 --- a/pkg/storage/replicas_storage.go +++ b/pkg/storage/replicas_storage.go @@ -28,7 +28,7 @@ import ( // we can lift. // - Implement interface. // - Unit tests and randomized tests, including engine restarts that lose -// state (using vfs.NewStrictMem). +// state (using vfs.NewCrashableMem). // - Benchmarks comparing single and two engine implementations. // - Race-build dynamically asserts that SSTs or MutationBatches that are // passed through this interface only touch the keys they are allowed to diff --git a/pkg/storage/sst_writer.go b/pkg/storage/sst_writer.go index 66ee08e83fac..bb16f696d4f1 100644 --- a/pkg/storage/sst_writer.go +++ b/pkg/storage/sst_writer.go @@ -349,7 +349,7 @@ func (fw *SSTWriter) PutInternalPointKey(key *pebble.InternalKey, value []byte) return errors.New("cannot decode engine key") } fw.DataSize += int64(len(ek.Key)) + int64(len(value)) - return fw.fw.Raw().Add(*key, value) + return fw.fw.Raw().AddWithForceObsolete(*key, value, false /* forceObsolete */) } // clearRange clears all point keys in the given range by dropping a Pebble diff --git a/pkg/storage/testdata/mvcc_histories/range_key_clear b/pkg/storage/testdata/mvcc_histories/range_key_clear index 08db3124aa79..a7cc9b6fa90e 100644 --- a/pkg/storage/testdata/mvcc_histories/range_key_clear +++ b/pkg/storage/testdata/mvcc_histories/range_key_clear @@ -6,11 +6,11 @@ # 7 [a7] [d7] [j7] [l7] # 6 f6 # 5 o---------------o k5 -# 4 x x d4 f4 g4 +# 4 x x d4 f4 g4 x # 3 o-------o e3 o-------oh3 # 2 a2 f2 g2 -# 1 o---------------------------------------o -# a b c d e f g h i j k +# 1 o---------------------------------------o o---o +# a b c d e f g h i j k l m # run ok put_rangekey k=a end=k ts=1 @@ -78,6 +78,16 @@ meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 data: "l"/7.000000000,0 -> /BYTES/l7 # Clear a few range key segments. +# After (removes the range key span [f-g)@3): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 f6 +# 5 o---------------o k5 +# 4 x x d4 f4 g4 x +# 3 o-------o e3 o---oh3 +# 2 a2 f2 g2 +# 1 o---------------------------------------o o---o +# a b c d e f g h i j k l m run ok clear_rangekey k=f end=g ts=3 ---- @@ -111,6 +121,16 @@ data: "k"/5.000000000,0 -> /BYTES/k5 meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=7.000000000,0 min=0,0 seq=0} ts=7.000000000,0 del=false klen=12 vlen=7 mergeTs= txnDidNotUpdateMeta=true data: "l"/7.000000000,0 -> /BYTES/l7 +# After (removes the range key span [e-f)@1): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 f6 +# 5 o---------------o k5 +# 4 x x d4 f4 g4 x +# 3 o-------o e3 o---oh3 +# 2 a2 f2 g2 +# 1 o---------------o o-------------------o o---o +# a b c d e f g h i j k l m run ok clear_rangekey k=e end=f ts=1 ---- @@ -146,7 +166,17 @@ data: "k"/5.000000000,0 -> /BYTES/k5 meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=7.000000000,0 min=0,0 seq=0} ts=7.000000000,0 del=false klen=12 vlen=7 mergeTs= txnDidNotUpdateMeta=true data: "l"/7.000000000,0 -> /BYTES/l7 -# Clearing segments is idempotent and works on missing segments. +# Clearing segments is idempotent. +# After (no change): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 f6 +# 5 o---------------o k5 +# 4 x x d4 f4 g4 x +# 3 o-------o e3 o---oh3 +# 2 a2 f2 g2 +# 1 o---------------o o-------------------o o---o +# a b c d e f g h i j k l m run ok clear_rangekey k=f end=g ts=3 ---- @@ -182,6 +212,17 @@ data: "k"/5.000000000,0 -> /BYTES/k5 meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=7.000000000,0 min=0,0 seq=0} ts=7.000000000,0 del=false klen=12 vlen=7 mergeTs= txnDidNotUpdateMeta=true data: "l"/7.000000000,0 -> /BYTES/l7 +# Clearing segments works on missing segments. +# After (no change): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 f6 +# 5 o---------------o k5 +# 4 x x d4 f4 g4 x +# 3 o-------o e3 o---oh3 +# 2 a2 f2 g2 +# 1 o---------------o o-------------------o o---o +# a b c d e f g h i j k l m run ok clear_rangekey k=a end=z ts=10 ---- @@ -218,6 +259,17 @@ meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 data: "l"/7.000000000,0 -> /BYTES/l7 # Now clear a few spans. +# +# After (wipes out [c-d)): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 f6 +# 5 o-----------o k5 +# 4 x x d4 f4 g4 x +# 3 o---o e3 o---oh3 +# 2 a2 f2 g2 +# 1 o-------o o---o o-------------------o o---o +# a b c d e f g h i j k l m run ok clear_range k=c end=d ---- @@ -252,6 +304,16 @@ data: "k"/5.000000000,0 -> /BYTES/k5 meta: "l"/0,0 -> txn={id=00000001 key=/Min iso=Serializable pri=0.00000000 epo=0 ts=7.000000000,0 min=0,0 seq=0} ts=7.000000000,0 del=false klen=12 vlen=7 mergeTs= txnDidNotUpdateMeta=true data: "l"/7.000000000,0 -> /BYTES/l7 +# After (wipes out [f-g)): +# T +# 7 [a7] [d7] [j7] [l7] +# 6 +# 5 o-------o k5 +# 4 x x d4 g4 x +# 3 o---o e3 o---oh3 +# 2 a2 g2 +# 1 o-------o o---o o---------------o o---o +# a b c d e f g h i j k l m run ok clear_range k=f end=g ---- diff --git a/pkg/testutils/diagutils/diag_test_server.go b/pkg/testutils/diagutils/diag_test_server.go index 010a05633bbc..25e538d044f2 100644 --- a/pkg/testutils/diagutils/diag_test_server.go +++ b/pkg/testutils/diagutils/diag_test_server.go @@ -32,6 +32,10 @@ type Server struct { numRequests int last *RequestData + + // Testing knobs. Setting these will override response from the test server. + respError error + respCode int } } @@ -81,6 +85,12 @@ func NewServer() *Server { panic(err) } srv.mu.last = data + + if srv.mu.respError != nil { + http.Error(w, srv.mu.respError.Error(), srv.mu.respCode) + } else if srv.mu.respCode != 0 { + w.WriteHeader(srv.mu.respCode) + } })) var err error @@ -117,3 +127,23 @@ func (s *Server) LastRequestData() *RequestData { defer s.mu.Unlock() return s.mu.last } + +func (s *Server) SetRespError(e error) func() { + s.mu.Lock() + defer s.mu.Unlock() + + s.mu.respError = e + return func() { + s.SetRespError(nil) + } +} + +func (s *Server) SetRespCode(code int) func() { + s.mu.Lock() + defer s.mu.Unlock() + + s.mu.respCode = code + return func() { + s.SetRespCode(0) + } +} diff --git a/pkg/testutils/lint/passes/errcmp/errcmp.go b/pkg/testutils/lint/passes/errcmp/errcmp.go index 57637d17b934..6ac54bd79c2f 100644 --- a/pkg/testutils/lint/passes/errcmp/errcmp.go +++ b/pkg/testutils/lint/passes/errcmp/errcmp.go @@ -83,7 +83,7 @@ func run(pass *analysis.Pass) (interface{}, error) { func checkErrSwitch(pass *analysis.Pass, s *ast.SwitchStmt) { if pass.TypesInfo.Types[s.Tag].Type == errorType && !passesutil.HasNolintComment(pass, s, name) { - pass.Reportf(s.Switch, escNl(`invalid direct comparison of error object + pass.Reportf(s.Switch, "%s", escNl(`invalid direct comparison of error object Tip: switch err { case errRef:... -> switch { case errors.Is(err, errRef): ... @@ -94,7 +94,7 @@ Tip: func checkErrCast(pass *analysis.Pass, texpr *ast.TypeAssertExpr) { if pass.TypesInfo.Types[texpr.X].Type == errorType && !passesutil.HasNolintComment(pass, texpr, name) { - pass.Reportf(texpr.Lparen, escNl(`invalid direct cast on error object + pass.Reportf(texpr.Lparen, "%s", escNl(`invalid direct cast on error object Alternatives: if _, ok := err.(*T); ok -> if errors.HasType(err, (*T)(nil)) if _, ok := err.(I); ok -> if errors.HasInterface(err, (*I)(nil)) @@ -129,7 +129,7 @@ func checkErrCmp(pass *analysis.Pass, binaryExpr *ast.BinaryExpr) { return } - pass.Reportf(binaryExpr.OpPos, escNl(`use errors.Is instead of a direct comparison + pass.Reportf(binaryExpr.OpPos, "%s", escNl(`use errors.Is instead of a direct comparison For example: if errors.Is(err, errMyOwnErrReference) { ... diff --git a/pkg/testutils/lint/passes/fmtsafe/functions.go b/pkg/testutils/lint/passes/fmtsafe/functions.go index 34c0b28d0880..77e2b60e939a 100644 --- a/pkg/testutils/lint/passes/fmtsafe/functions.go +++ b/pkg/testutils/lint/passes/fmtsafe/functions.go @@ -125,6 +125,8 @@ var requireConstFmt = map[string]bool{ "(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Fatalf": true, "(*github.com/cockroachdb/cockroach/pkg/kv/kvserver.raftLogger).Panicf": true, + "(*github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvflowcontrol/rac2.LogTracker).errorf": true, + "(github.com/cockroachdb/cockroach/pkg/raft.Logger).Debugf": true, "(github.com/cockroachdb/cockroach/pkg/raft.Logger).Infof": true, "(github.com/cockroachdb/cockroach/pkg/raft.Logger).Warningf": true, @@ -170,6 +172,8 @@ var requireConstFmt = map[string]bool{ "(github.com/cockroachdb/cockroach/pkg/kv/kvpb.TestPrinter).Printf": true, + "(*github.com/cockroachdb/cockroach/pkg/cloud/amazon.awsLogAdapter).Logf": true, + // Error things are populated in the init() message. } diff --git a/pkg/testutils/lint/passes/redactcheck/redactcheck.go b/pkg/testutils/lint/passes/redactcheck/redactcheck.go index 3d26593c704c..c08748002a67 100644 --- a/pkg/testutils/lint/passes/redactcheck/redactcheck.go +++ b/pkg/testutils/lint/passes/redactcheck/redactcheck.go @@ -136,12 +136,15 @@ func runAnalyzer(pass *analysis.Pass) (interface{}, error) { "github.com/cockroachdb/cockroach/pkg/kv/kvserver/split": { "SplitObjective": {}, }, + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness/storelivenesspb": { + "Epoch": {}, + }, "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities": { "ID": {}, }, "github.com/cockroachdb/cockroach/pkg/raft/raftpb": { - "PeerID": {}, "Epoch": {}, + "PeerID": {}, }, "github.com/cockroachdb/cockroach/pkg/repstream/streampb": { "StreamID": {}, diff --git a/pkg/testutils/localtestcluster/BUILD.bazel b/pkg/testutils/localtestcluster/BUILD.bazel index 47a9d16fc506..8419d4e963e7 100644 --- a/pkg/testutils/localtestcluster/BUILD.bazel +++ b/pkg/testutils/localtestcluster/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "//pkg/kv/kvserver/closedts/sidetransport", "//pkg/kv/kvserver/kvstorage", "//pkg/kv/kvserver/liveness", + "//pkg/kv/kvserver/storeliveness", "//pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer", "//pkg/roachpb", "//pkg/rpc", diff --git a/pkg/testutils/localtestcluster/local_test_cluster.go b/pkg/testutils/localtestcluster/local_test_cluster.go index c91b3a16e583..7e0a6d586d59 100644 --- a/pkg/testutils/localtestcluster/local_test_cluster.go +++ b/pkg/testutils/localtestcluster/local_test_cluster.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/sidetransport" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvstorage" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storeliveness" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitiesauthorizer" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -183,6 +184,9 @@ func (ltc *LocalTestCluster) Start(t testing.TB, initFactory InitFactoryFn) { } ltc.DB = kv.NewDBWithContext(cfg.AmbientCtx, factory, ltc.Clock, *ltc.dbContext) transport := kvserver.NewDummyRaftTransport(cfg.AmbientCtx, cfg.Settings, ltc.Clock) + storeLivenessTransport := storeliveness.NewTransport( + cfg.AmbientCtx, ltc.stopper, ltc.Clock, nil, nil, + ) // By default, disable the replica scanner and split queue, which // confuse tests using LocalTestCluster. if ltc.StoreTestingKnobs == nil { @@ -226,6 +230,7 @@ func (ltc *LocalTestCluster) Start(t testing.TB, initFactory InitFactoryFn) { /* deterministic */ false, ) cfg.Transport = transport + cfg.StoreLivenessTransport = storeLivenessTransport cfg.ClosedTimestampReceiver = sidetransport.NewReceiver(nc, ltc.stopper, ltc.Stores, nil /* testingKnobs */) if err := kvstorage.WriteClusterVersion(ctx, ltc.Eng, clusterversion.TestingClusterVersion); err != nil { diff --git a/pkg/testutils/release/cockroach_releases.yaml b/pkg/testutils/release/cockroach_releases.yaml index fdac04ca3c15..398b705f0919 100644 --- a/pkg/testutils/release/cockroach_releases.yaml +++ b/pkg/testutils/release/cockroach_releases.yaml @@ -14,18 +14,18 @@ - 22.2.8 predecessor: "22.1" "23.1": - latest: 23.1.24 + latest: 23.1.25 withdrawn: - 23.1.0 predecessor: "22.2" "23.2": - latest: 23.2.9 + latest: 23.2.10 predecessor: "23.1" "24.1": - latest: 24.1.3 + latest: 24.1.4 predecessor: "23.2" "24.2": - latest: 24.2.0-rc.1 + latest: 24.2.0 predecessor: "24.1" "24.3": predecessor: "24.2" diff --git a/pkg/ui/BUILD.bazel b/pkg/ui/BUILD.bazel index 19ef0140fbee..c39e7f9eea29 100644 --- a/pkg/ui/BUILD.bazel +++ b/pkg/ui/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/settings/cluster", "//pkg/util/httputil", "//pkg/util/log", + "//pkg/util/timeutil", ], ) diff --git a/pkg/ui/ui.go b/pkg/ui/ui.go index 69b95dbc5662..855acac662a6 100644 --- a/pkg/ui/ui.go +++ b/pkg/ui/ui.go @@ -32,6 +32,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) const ( @@ -168,7 +169,7 @@ func Handler(cfg Config) http.Handler { if err != nil { log.Errorf(context.Background(), "unable to get license type: %+v", err) } - licenseTTL := base.LicenseTTL.Value() + licenseTTL := base.GetLicenseTTL(r.Context(), cfg.Settings, timeutil.DefaultTimeSource{}) oidcConf := cfg.OIDC.GetOIDCConf() major, minor := build.BranchReleaseSeries() args := indexHTMLArgs{ diff --git a/pkg/ui/workspaces/cluster-ui/src/api/statementDiagnosticsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/statementDiagnosticsApi.ts index 5fa5c307228d..bfa949e552cc 100644 --- a/pkg/ui/workspaces/cluster-ui/src/api/statementDiagnosticsApi.ts +++ b/pkg/ui/workspaces/cluster-ui/src/api/statementDiagnosticsApi.ts @@ -8,17 +8,17 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. +import { cockroach } from "@cockroachlabs/crdb-protobuf-client"; +import Long from "long"; import moment from "moment-timezone"; -import { - createSqlExecutionRequest, - executeInternalSql, - executeInternalSqlHelper, - SqlExecutionRequest, - SqlTxnResult, - txnResultSetIsEmpty, -} from "src/api"; -import { Duration } from "src/util/format"; +import { fetchData } from "src/api"; + +import { NumberToDuration } from "../util"; + +const STATEMENT_DIAGNOSTICS_PATH = "_status/stmtdiagreports"; +const CANCEL_STATEMENT_DIAGNOSTICS_PATH = + STATEMENT_DIAGNOSTICS_PATH + "/cancel"; export type StatementDiagnosticsReport = { id: string; @@ -33,70 +33,26 @@ export type StatementDiagnosticsReport = { export type StatementDiagnosticsResponse = StatementDiagnosticsReport[]; export async function getStatementDiagnosticsReports(): Promise { - let result: StatementDiagnosticsResponse = []; - - const createReq = () => { - let offset = ""; - const args = []; - if (result.length > 0) { - // Using the id is more performant and reliable than offset. - // Schema is PRIMARY KEY (id) with INT8 DEFAULT unique_rowid() NOT NULL. - offset = " AND (id::STRING < $1) "; - const last = result[result.length - 1]; - args.push(last.id); - } - const query = `SELECT - id::STRING, - statement_fingerprint, - completed, - statement_diagnostics_id::STRING, - requested_at, - min_execution_latency, - expires_at - FROM - system.statement_diagnostics_requests - WHERE - (expires_at > now() OR expires_at IS NULL OR completed = true) ${offset} - order by id desc`; - - return createSqlExecutionRequest(undefined, [ - { - sql: query, - arguments: args, - }, - ]); - }; - - const err = await executeInternalSqlHelper( - createReq, - (response: SqlTxnResult[]) => { - if (!response) { - return; - } - - if (txnResultSetIsEmpty(response)) { - return; - } - - response.forEach(x => { - if (x.rows && x.rows.length > 0) { - result = result.concat(x.rows); - } - }); - }, + const response = await fetchData( + cockroach.server.serverpb.StatementDiagnosticsReportsResponse, + STATEMENT_DIAGNOSTICS_PATH, ); - - if (err) { - throw err; - } - - return result; + return response.reports.map(report => { + return { + id: report.id.toString(), + statement_fingerprint: report.statement_fingerprint, + completed: report.completed, + statement_diagnostics_id: report.statement_diagnostics_id.toString(), + requested_at: moment.unix(report.requested_at.seconds.toNumber()), + min_execution_latency: moment.duration( + report.min_execution_latency.seconds.toNumber(), + "seconds", + ), + expires_at: moment.unix(report.expires_at.seconds.toNumber()), + }; + }); } -type CheckPendingStmtDiagnosticRow = { - count: number; -}; - export type InsertStmtDiagnosticRequest = { stmtFingerprint: string; samplingProbability?: number; @@ -109,97 +65,24 @@ export type InsertStmtDiagnosticResponse = { req_resp: boolean; }; -export function createStatementDiagnosticsReport({ - stmtFingerprint, - samplingProbability, - minExecutionLatencySeconds, - expiresAfterSeconds, - planGist, -}: InsertStmtDiagnosticRequest): Promise { - const args: Array = [stmtFingerprint]; - let query = `SELECT crdb_internal.request_statement_bundle($1, $2, $3::INTERVAL, $4::INTERVAL) as req_resp`; - - if (planGist) { - args.push(planGist); - query = `SELECT crdb_internal.request_statement_bundle($1, $2, $3, $4::INTERVAL, $5::INTERVAL) as req_resp`; - } - - if (samplingProbability) { - args.push(samplingProbability); - } else { - args.push(0); - } - if (minExecutionLatencySeconds) { - args.push(Duration(minExecutionLatencySeconds * 1e9)); - } else { - args.push("0"); - } - if (expiresAfterSeconds && expiresAfterSeconds !== 0) { - args.push(Duration(expiresAfterSeconds * 1e9)); - } else { - args.push("0"); - } - - const createStmtDiag = { - sql: query, - arguments: args, - }; - - const req: SqlExecutionRequest = { - execute: true, - statements: [createStmtDiag], - }; - - return checkExistingDiagRequest(stmtFingerprint).then(_ => { - return executeInternalSql(req).then(res => { - // If request succeeded but query failed, throw error (caught by saga/cacheDataReducer). - if (res.error) { - throw res.error; - } - - if ( - res.execution?.txn_results[0]?.rows?.length === 0 || - res.execution?.txn_results[0]?.rows[0]["req_resp"] === false - ) { - throw new Error("Failed to insert statement diagnostics request"); - } - - return res.execution.txn_results[0].rows[0]; - }); - }); -} - -function checkExistingDiagRequest(stmtFingerprint: string): Promise { - // Query to check if there's already a pending request for this fingerprint. - const checkPendingStmtDiag = { - sql: `SELECT count(1) FROM system.statement_diagnostics_requests - WHERE - completed = false AND - statement_fingerprint = $1 AND - (expires_at IS NULL OR expires_at > now())`, - arguments: [stmtFingerprint], - }; - - const req: SqlExecutionRequest = { - execute: true, - statements: [checkPendingStmtDiag], - }; - - return executeInternalSql(req).then(res => { - // If request succeeded but query failed, throw error (caught by saga/cacheDataReducer). - if (res.error) { - throw res.error; - } - - if (res.execution?.txn_results[0]?.rows?.length === 0) { - throw new Error("Failed to check pending statement diagnostics"); - } - - if (res.execution.txn_results[0].rows[0].count > 0) { - throw new Error( - "A pending request for the requested fingerprint already exists. Cancel the existing request first and try again.", - ); - } +export async function createStatementDiagnosticsReport( + req: InsertStmtDiagnosticRequest, +): Promise { + return fetchData( + cockroach.server.serverpb.CreateStatementDiagnosticsReportResponse, + STATEMENT_DIAGNOSTICS_PATH, + cockroach.server.serverpb.CreateStatementDiagnosticsReportRequest, + new cockroach.server.serverpb.CreateStatementDiagnosticsReportRequest({ + statement_fingerprint: req.stmtFingerprint, + sampling_probability: req.samplingProbability, + min_execution_latency: NumberToDuration(req.minExecutionLatencySeconds), + expires_after: NumberToDuration(req.expiresAfterSeconds), + plan_gist: req.planGist, + }), + ).then(response => { + return { + req_resp: response.report !== null, + }; }); } @@ -211,42 +94,22 @@ export type CancelStmtDiagnosticResponse = { stmt_diag_req_id: string; }; -export function cancelStatementDiagnosticsReport({ - requestId, -}: CancelStmtDiagnosticRequest): Promise { - const query = `UPDATE system.statement_diagnostics_requests -SET expires_at = '1970-01-01' -WHERE completed = false -AND id = $1::INT8 -AND (expires_at IS NULL OR expires_at > now()) RETURNING id as stmt_diag_req_id`; - const req: SqlExecutionRequest = { - execute: true, - statements: [ - { - sql: query, - arguments: [requestId], - }, - ], - }; - - return executeInternalSql(req).then(res => { - // If request succeeded but query failed, throw error (caught by saga/cacheDataReducer). - if (res.error) { - throw res.error; +export async function cancelStatementDiagnosticsReport( + req: CancelStmtDiagnosticRequest, +): Promise { + return fetchData( + cockroach.server.serverpb.CancelStatementDiagnosticsReportResponse, + CANCEL_STATEMENT_DIAGNOSTICS_PATH, + cockroach.server.serverpb.CancelStatementDiagnosticsReportRequest, + new cockroach.server.serverpb.CancelStatementDiagnosticsReportRequest({ + request_id: Long.fromString(req.requestId), + }), + ).then(response => { + if (response.error) { + throw new Error(response.error); } - - if (!res.execution?.txn_results?.length) { - throw new Error( - "cancelStatementDiagnosticsReport - unexpected zero txn_results", - ); - } - - if (res.execution.txn_results[0].rows?.length === 0) { - throw new Error( - `No pending request found for the request id: ${requestId}`, - ); - } - - return res.execution.txn_results[0].rows[0]; + return { + stmt_diag_req_id: req.requestId, + }; }); } diff --git a/pkg/ui/workspaces/crdb-api-client/BUILD.bazel b/pkg/ui/workspaces/crdb-api-client/BUILD.bazel index 6d356a4a857c..cc2517a2b6f9 100644 --- a/pkg/ui/workspaces/crdb-api-client/BUILD.bazel +++ b/pkg/ui/workspaces/crdb-api-client/BUILD.bazel @@ -3,36 +3,46 @@ load("@aspect_rules_js//npm:defs.bzl", "npm_package") load("@aspect_rules_ts//ts:defs.bzl", "ts_project") load("@bazel_skylib//rules:write_file.bzl", "write_file") load("@npm//:defs.bzl", "npm_link_all_packages") -load("//docs/generated/http:defs.bzl", "PROTOBUF_TARGETS") -load("//pkg/ui/workspaces/db-console/src/js:defs.bzl", "proto_sources") +load("//pkg:protos.bzl", "PROTO_FILES", "SERVER_PROTOS") npm_link_all_packages(name = "node_modules") -genrule( - name = "protos", - srcs = [ - ":node_modules/@bufbuild/protoc-gen-es/bin/protoc-gen-es", - ":proto_sources", - ] + PROTOBUF_TARGETS, - outs = ["dist"], - cmd = """ - mkdir -p $(OUTS); - DESCRIPTOR_SET_IN=$$(for word in $(SRCS); do echo $$word; done | grep '\\.bin$$' | tr -s '\\n' ':'); - PROTO_PATHS=$$(for f in $(locations :proto_sources); - do if [[ "$$f" =~ ^.*/pkg/.*/_virtual_imports/.*_proto/.*.proto$$ ]]; - then echo $$f | sed 's|.*pkg/.*/_virtual_imports/.*_proto/||' | sed 's|.bin$$||'; - elif [[ "$$f" =~ ^.*/com_github_cockroachdb_errors/.*.proto$$ ]]; - then echo $$f | sed 's|.*/com_github_cockroachdb_errors/||' | sed 's|.bin$$||'; - elif [[ "$$f" =~ ^.*/io/prometheus/client/.*.proto$$ ]]; - then echo $$f | sed 's|.*/io/prometheus/client/|io/prometheus/client/|' | sed 's|.bin$$||'; - fi; done | sort -u); +OUTPUT_FILES = [ + f + for files in [ + [ + file.replace(".proto", "_pb.js"), + file.replace(".proto", "_pb.d.ts"), + ] + for file in [ + "dist/" + label.replace("//pkg/", "").replace(":", "/").replace("@com_github_cockroachdb_errors//", "").replace("@com_github_prometheus_client_model//", "") + for label in PROTO_FILES + ] + ] + for f in files +] + +PROTOS_CMD = """ + mkdir -p $(RULEDIR)/dist + DESCRIPTOR_SET_IN=$$(for word in $(SRCS); do echo $$word; done | grep '\\.bin$$' | tr -s '\\n' ':') + ALL_PROTOS="{}" export PATH=$$PATH:$$(dirname $(NODE_PATH)) $(location @com_google_protobuf//:protoc) \ --plugin=$(location :node_modules/@bufbuild/protoc-gen-es/bin/protoc-gen-es) \ - --es_out $(OUTS) \ + --es_out $(RULEDIR)/dist \ --descriptor_set_in $$DESCRIPTOR_SET_IN \ --es_opt target=js+dts \ - $$PROTO_PATHS""", + $$ALL_PROTOS""".format( + " ".join([label.replace("//pkg/", "").replace(":", "/").replace("@com_github_cockroachdb_errors//", "").replace("@com_github_prometheus_client_model//", "") for label in PROTO_FILES]), +) + +genrule( + name = "protos", + srcs = [ + ":node_modules/@bufbuild/protoc-gen-es/bin/protoc-gen-es", + ] + SERVER_PROTOS, + outs = OUTPUT_FILES, + cmd = PROTOS_CMD, toolchains = ["@nodejs_toolchains//:resolved_toolchain"], tools = [ "@com_google_protobuf//:protoc", @@ -40,27 +50,22 @@ genrule( ], ) -proto_sources( - name = "proto_sources", - protos = [ - "//pkg/server/serverpb:serverpb_proto", - "//pkg/ts/tspb:tspb_proto", - ], -) - genrule( name = "index_ts", srcs = [":protos"], outs = ["index.ts"], cmd = """ - ABS_ROOT_PATH=$$(realpath $(SRCS)) - for f in $$(find $$ABS_ROOT_PATH -name *.js) + for f in $(SRCS) do + if [[ $$f != *.js ]] + then + continue + fi filename=$$(basename -s .js $$f) - filepath=$${f#$$ABS_ROOT_PATH/} + filepath=$$(echo $$f | sed 's|^.*dist/||') filepath=$${filepath%.js} - import_name=$${filepath/-/_}; \ - echo export "*" as $${import_name////_} from \\"./$$(basename $(SRCS))/$${filepath}\\"\\; >> $@ + import_name=$${filepath/-/_} + echo export '*' as $${import_name////_} from \\"./dist/$${filepath}\\"\\; >> $@ done """, ) diff --git a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/logicalDataReplication.tsx b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/logicalDataReplication.tsx index eedd545c4ed5..650f6ecfd27b 100644 --- a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/logicalDataReplication.tsx +++ b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/logicalDataReplication.tsx @@ -24,10 +24,10 @@ export default function (props: GraphDashboardProps) { return [ @@ -47,17 +47,18 @@ export default function (props: GraphDashboardProps) { , datapoints .filter(d => d.value !== 0) diff --git a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/overload.tsx b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/overload.tsx index 9c11d04e656f..9b0989030462 100644 --- a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/overload.tsx +++ b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/overload.tsx @@ -61,10 +61,7 @@ export default function (props: GraphDashboardProps) { @@ -86,7 +83,7 @@ export default function (props: GraphDashboardProps) { key={nid} name="cr.node.admission.granter.io_tokens_exhausted_duration.kv" title={ - "Regular (Foreground) IO Exhausted " + + "Regular (Foreground) " + nodeDisplayName(nodeDisplayNameByID, nid) } sources={[nid]} @@ -96,7 +93,7 @@ export default function (props: GraphDashboardProps) { key={nid} name="cr.node.admission.granter.elastic_io_tokens_exhausted_duration.kv" title={ - "Elastic (Background) IO Exhausted " + + "Elastic (Background) " + nodeDisplayName(nodeDisplayNameByID, nid) } sources={[nid]} @@ -120,7 +117,7 @@ export default function (props: GraphDashboardProps) { @@ -140,10 +137,7 @@ export default function (props: GraphDashboardProps) { @@ -171,18 +165,14 @@ export default function (props: GraphDashboardProps) { @@ -198,13 +188,13 @@ export default function (props: GraphDashboardProps) { showMetricsInTooltip={true} tooltip={`The 99th percentile latency of requests waiting in the Admission Control store queue.`} > - + {nodeIDs.map(nid => ( <> @@ -212,7 +202,7 @@ export default function (props: GraphDashboardProps) { key={nid} name="cr.node.admission.wait_durations.elastic-stores-p99" title={ - "elastic (background) write " + + "Elastic " + nodeDisplayName(nodeDisplayNameByID, nid) } sources={[nid]} @@ -252,14 +242,14 @@ export default function (props: GraphDashboardProps) { showMetricsInTooltip={true} tooltip={`The 99th percentile latency of requests waiting in the Replication Admission Control queue. This metric is indicative of store overload on replicas.`} > - + {nodeIDs.map(nid => ( <> - + {nodeIDs.map(nid => ( <> @@ -420,7 +404,7 @@ export default function (props: GraphDashboardProps) { key={nid} name="cr.store.storage.l0-sublevels" title={ - "L0 Sublevels " + nodeDisplayName(nodeDisplayNameByID, nid) + nodeDisplayName(nodeDisplayNameByID, nid) } sources={storeIDsForNode(storeIDsByNodeID, nid)} /> diff --git a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/replication.tsx b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/replication.tsx index 91c3682abfd3..538f622a6d96 100644 --- a/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/replication.tsx +++ b/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards/replication.tsx @@ -466,15 +466,12 @@ export default function (props: GraphDashboardProps) { tenantSource={tenantSource} showMetricsInTooltip={true} > - + {nodeIDs.map(nid => ( diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx index 542637546e09..6374a5d99b4f 100644 --- a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx +++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx @@ -82,6 +82,10 @@ const connectionTableColumns: ConnectionTableColumn[] = [ title: "Paused Replicas", extract: problem => problem.paused_replica_ids.length, }, + { + title: "Range Too Large", + extract: problem => problem.too_large_range_ids.length, + }, { title: "Total", extract: problem => { @@ -95,7 +99,8 @@ const connectionTableColumns: ConnectionTableColumn[] = [ problem.quiescent_equals_ticking_range_ids.length + problem.raft_log_too_large_range_ids.length + problem.circuit_breaker_error_range_ids.length + - problem.paused_replica_ids.length + problem.paused_replica_ids.length + + problem.too_large_range_ids.length ); }, }, diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx index b56960793b8a..f3325bfc829b 100644 --- a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx +++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx @@ -230,6 +230,11 @@ export class ProblemRanges extends React.Component { problems={problems} extract={problem => problem.paused_replica_ids} /> + problem.too_large_range_ids} + /> ); } diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/range/rangeTable.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/range/rangeTable.tsx index d2ae58c44014..c0870dca5734 100644 --- a/pkg/ui/workspaces/db-console/src/views/reports/containers/range/rangeTable.tsx +++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/range/rangeTable.tsx @@ -492,6 +492,9 @@ export default class RangeTable extends React.Component { if (problems.raft_log_too_large) { results = concat(results, "Raft log too large"); } + if (problems.range_too_large) { + results = concat(results, "Range too large"); + } if (awaitingGC) { results = concat(results, "Awaiting GC"); } diff --git a/pkg/ui/workspaces/e2e-tests/cypress/e2e/health-check/authenticated.cy.ts b/pkg/ui/workspaces/e2e-tests/cypress/e2e/health-check/authenticated.cy.ts index c16cf3e9c9fe..7308643d2daf 100644 --- a/pkg/ui/workspaces/e2e-tests/cypress/e2e/health-check/authenticated.cy.ts +++ b/pkg/ui/workspaces/e2e-tests/cypress/e2e/health-check/authenticated.cy.ts @@ -8,9 +8,14 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. +import { SQLPrivilege } from "../../support/types"; + describe("health check: authenticated user", () => { it("serves a DB Console overview page", () => { - cy.login(); + cy.getUserWithExactPrivileges([SQLPrivilege.ADMIN]); + cy.fixture("users").then((users) => { + cy.login(users[0].username, users[0].password); + }); // Ensure that something reasonable renders at / when authenticated, making // just enough assertions to ensure the right page loaded. If this test diff --git a/pkg/ui/workspaces/e2e-tests/cypress/e2e/statementBundles/statementBundles.cy.ts b/pkg/ui/workspaces/e2e-tests/cypress/e2e/statementBundles/statementBundles.cy.ts new file mode 100644 index 000000000000..11fa08462ff5 --- /dev/null +++ b/pkg/ui/workspaces/e2e-tests/cypress/e2e/statementBundles/statementBundles.cy.ts @@ -0,0 +1,91 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { SQLPrivilege } from "../../support/types"; + +// TODO (xinhaoz): This test currently only works when running pnpm run cy:run +// directly against a local cluster set up with sql activity in the last hour +// and the expected cypress users in fixtures. We need to automate this server +// setup for e2e testing. +describe("statement bundles", () => { + const runTestsForPrivilegedUser = (privilege: SQLPrivilege) => { + describe(`${privilege} user`, () => { + beforeEach(() => { + cy.getUserWithExactPrivileges([privilege]).then((user) => { + cy.login(user.username, user.password); + }); + }); + + it("can request statement bundles", () => { + cy.visit("#/sql-activity"); + cy.contains("button", "Apply").click(); + // Open modal. + cy.contains("button", "Activate").click(); + // Wait for modal. + cy.findByText(/activate statement diagnostics/i).should("be.visible"); + // Click the Activate button within the modal + cy.get(`[role="dialog"]`) // Adjust this selector to match your modal's structure + .contains("button", "Activate") + .click(); + cy.findByText(/statement diagnostics were successfully activated/i); + }); + + it("can view statement bundles", () => { + cy.visit("#/reports/statements/diagnosticshistory"); + cy.get("table tbody tr").should("have.length.at.least", 1); + }); + + it("can cancel statement bundles", () => { + cy.visit("#/reports/statements/diagnosticshistory"); + cy.get("table tbody tr").should("have.length.at.least", 1); + cy.contains("button", "Cancel").click(); + cy.findByText(/statement diagnostics were successfully cancelled/i); + }); + }); + }; + + const runTestsForNonPrivilegedUser = (privilege?: SQLPrivilege) => { + beforeEach(() => { + cy.getUserWithExactPrivileges(privilege ? [privilege] : []).then( + (user) => { + cy.login(user.username, user.password); + }, + ); + }); + + it("cannot request statement bundles", () => { + cy.visit("#/sql-activity"); + cy.contains("button", "Apply").click(); + // Should not see an Activate button. + cy.contains("button", "Activate").should("not.exist"); + }); + + it("cannot view statement bundles", () => { + cy.visit("#/reports/statements/diagnosticshistory"); + cy.findByText(/no statement diagnostics to show/i); + }); + }; + + describe("view activity user", () => { + runTestsForPrivilegedUser(SQLPrivilege.VIEWACTIVITY); + }); + + describe("admin user", () => { + runTestsForPrivilegedUser(SQLPrivilege.ADMIN); + }); + + describe("non-privileged VIEWACTIVITYREDACTED user", () => { + runTestsForNonPrivilegedUser(SQLPrivilege.VIEWACTIVITYREDACTED); + }); + + describe("non-privileged user", () => { + runTestsForNonPrivilegedUser(); + }); +}); diff --git a/pkg/ui/workspaces/e2e-tests/cypress/fixtures/users.json b/pkg/ui/workspaces/e2e-tests/cypress/fixtures/users.json new file mode 100644 index 000000000000..606d8e4a91f6 --- /dev/null +++ b/pkg/ui/workspaces/e2e-tests/cypress/fixtures/users.json @@ -0,0 +1,28 @@ +[ + { + "username": "cypress", + "password": "tests", + "sqlPrivileges": [ + "ADMIN" + ] + }, + { + "username": "va_user", + "password": "cypress", + "sqlPrivileges": [ + "VIEWACTIVITY" + ] + }, + { + "username": "va_redacted_user", + "password": "cypress", + "sqlPrivileges": [ + "VIEWACTIVITYREDACTED" + ] + }, + { + "username": "no_privs_user", + "password": "cypress", + "sqlPrivileges": [] + } +] diff --git a/pkg/ui/workspaces/e2e-tests/cypress/support/commands.ts b/pkg/ui/workspaces/e2e-tests/cypress/support/commands.ts index f7b291b090fa..d24216d00675 100644 --- a/pkg/ui/workspaces/e2e-tests/cypress/support/commands.ts +++ b/pkg/ui/workspaces/e2e-tests/cypress/support/commands.ts @@ -9,15 +9,16 @@ // licenses/APL.txt. import "@testing-library/cypress/add-commands"; +import { SQLPrivilege, User } from "./types"; -Cypress.Commands.add("login", () => { +Cypress.Commands.add("login", (username: string, password: string) => { cy.request({ method: "POST", url: "/api/v2/login/", form: true, body: { - username: Cypress.env("username"), - password: Cypress.env("password"), + username: username, + password: password, }, failOnStatusCode: true, }).then(({ body }) => { @@ -33,3 +34,14 @@ Cypress.Commands.add("login", () => { } }); }); + +// Gets a user from the users.json fixture with the given privileges. +Cypress.Commands.add("getUserWithExactPrivileges", (privs: SQLPrivilege[]) => { + return cy.fixture("users").then((users) => { + return users.find( + (user: User) => + privs.every((priv) => user.sqlPrivileges.includes(priv)) || + (privs.length === 0 && user.sqlPrivileges.length === 0), + ); + }); +}); diff --git a/pkg/ui/workspaces/e2e-tests/cypress/support/index.ts b/pkg/ui/workspaces/e2e-tests/cypress/support/index.ts index f017e842a498..c7cc68479de4 100644 --- a/pkg/ui/workspaces/e2e-tests/cypress/support/index.ts +++ b/pkg/ui/workspaces/e2e-tests/cypress/support/index.ts @@ -24,6 +24,7 @@ // *********************************************************** import "./commands"; +import { SQLPrivilege, User } from "./types"; declare global { // eslint-disable-next-line @typescript-eslint/no-namespace -- required for declaration merging @@ -33,10 +34,11 @@ declare global { * Sets a session cookie for the demo user on the current * database. * @example - * cy.login(); + * cy.login("cypress", "tests"); * cy.visit("#/some/authenticated/route"); */ - login(): void; + login(username: string, password: string): Chainable; + getUserWithExactPrivileges(privs: SQLPrivilege[]): Chainable; } } } diff --git a/pkg/ui/workspaces/e2e-tests/cypress/support/types.ts b/pkg/ui/workspaces/e2e-tests/cypress/support/types.ts new file mode 100644 index 000000000000..e545d0ee7374 --- /dev/null +++ b/pkg/ui/workspaces/e2e-tests/cypress/support/types.ts @@ -0,0 +1,22 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +export enum SQLPrivilege { + ADMIN = "ADMIN", + VIEWACTIVITY = "VIEWACTIVITY", + VIEWACTIVITYREDACTED = "VIEWACTIVITYREDACTED", + NONE = "NONE", +} + +export type User = { + username: string; + password: string; + sqlPrivileges: SQLPrivilege[]; +}; diff --git a/pkg/upgrade/BUILD.bazel b/pkg/upgrade/BUILD.bazel index a15474cd359d..d8ef6ec7e42d 100644 --- a/pkg/upgrade/BUILD.bazel +++ b/pkg/upgrade/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "//pkg/sql/sessiondata", "//pkg/sql/sqlstats", "//pkg/upgrade/upgradebase", - "//pkg/util/log", "//pkg/util/retry", "//pkg/util/stop", "//pkg/util/uuid", diff --git a/pkg/upgrade/helpers.go b/pkg/upgrade/helpers.go index 06ca46369c55..28995dcf128b 100644 --- a/pkg/upgrade/helpers.go +++ b/pkg/upgrade/helpers.go @@ -13,40 +13,12 @@ package upgrade import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) -// FenceVersionFor constructs the appropriate "fence version" for the given -// cluster version. Fence versions allow the upgrades infrastructure to safely -// step through consecutive cluster versions in the presence of Nodes (running -// any binary version) being added to the cluster. See the upgrade manager -// above for intended usage. -// -// Fence versions (and the upgrades infrastructure entirely) were introduced -// in the 21.1 release cycle. In the same release cycle, we introduced the -// invariant that new user-defined versions (users being crdb engineers) must -// always have even-numbered Internal versions, thus reserving the odd numbers -// to slot in fence versions for each cluster version. See top-level -// documentation in pkg/clusterversion for more details. -func FenceVersionFor( - ctx context.Context, cv clusterversion.ClusterVersion, -) clusterversion.ClusterVersion { - if (cv.Internal % 2) != 0 { - log.Fatalf(ctx, "only even numbered internal versions allowed, found %s", cv.Version) - } - - // We'll pick the odd internal version preceding the cluster version, - // slotting ourselves right before it. - fenceCV := cv - fenceCV.Internal-- - return fenceCV -} - // BumpSystemDatabaseSchemaVersion bumps the SystemDatabaseSchemaVersion // field for the system database descriptor. It is called after every upgrade // step that has an associated migration, and when upgrading to the final diff --git a/pkg/upgrade/upgradebase/testing_knobs.go b/pkg/upgrade/upgradebase/testing_knobs.go index 628ef4d5f5f5..5a7333be5892 100644 --- a/pkg/upgrade/upgradebase/testing_knobs.go +++ b/pkg/upgrade/upgradebase/testing_knobs.go @@ -73,6 +73,8 @@ type TestingKnobs struct { SkipUpdateSQLActivityJobBootstrap bool SkipMVCCStatisticsJobBootstrap bool + + SkipUpdateTableMetadataCacheBootstrap bool } // ModuleTestingKnobs makes TestingKnobs a base.ModuleTestingKnobs. diff --git a/pkg/upgrade/upgrademanager/manager.go b/pkg/upgrade/upgrademanager/manager.go index 97d69dfdddf6..22fb713d61b9 100644 --- a/pkg/upgrade/upgrademanager/manager.go +++ b/pkg/upgrade/upgrademanager/manager.go @@ -466,7 +466,7 @@ func (m *Manager) Migrate( cv := clusterversion.ClusterVersion{Version: clusterVersion} - fenceVersion := upgrade.FenceVersionFor(ctx, cv) + fenceVersion := cv.FenceVersion() if err := bumpClusterVersion(ctx, m.deps.Cluster, fenceVersion); err != nil { return err } diff --git a/pkg/upgrade/upgrademanager/manager_external_test.go b/pkg/upgrade/upgrademanager/manager_external_test.go index 37cac8f584c9..fc35b71c746b 100644 --- a/pkg/upgrade/upgrademanager/manager_external_test.go +++ b/pkg/upgrade/upgrademanager/manager_external_test.go @@ -827,7 +827,7 @@ func TestMigrationFailure(t *testing.T) { // Pick a random version in to fail at versions := clusterversion.ListBetween(startVersion, endVersion) failVersion := versions[rand.Intn(len(versions))] - fenceVersion := upgrade.FenceVersionFor(ctx, clusterversion.ClusterVersion{Version: failVersion}).Version + fenceVersion := failVersion.FenceVersion() t.Logf("test will fail at version: %s", failVersion.String()) // Create a storage cluster for the tenant. diff --git a/pkg/upgrade/upgrades/BUILD.bazel b/pkg/upgrade/upgrades/BUILD.bazel index 26077901638b..fe589c0ba0cb 100644 --- a/pkg/upgrade/upgrades/BUILD.bazel +++ b/pkg/upgrade/upgrades/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "descriptor_utils.go", "first_upgrade.go", "permanent_create_jobs_metrics_polling_job.go", + "permanent_create_update_table_metadata_cache_job.go", "permanent_ensure_sql_schema_telemetry_schedule.go", "permanent_key_visualizer_migration.go", "permanent_mvcc_statistics_migration.go", @@ -19,6 +20,9 @@ go_library( "v24_2_tenant_rates.go", "v24_2_tenant_system_tables.go", "v24_3_add_timeseries_zone_config.go", + "v24_3_sql_instances_add_draining.go", + "v24_3_table_metadata_system_table.go", + "v24_3_tenant_exclude_data_from_backup.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/upgrade/upgrades", visibility = ["//visibility:public"], @@ -46,8 +50,10 @@ go_library( "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", "//pkg/sql/isql", + "//pkg/sql/sem/catconstants", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", + "//pkg/sql/tablemetadatacache", "//pkg/upgrade", "//pkg/upgrade/upgradebase", "//pkg/util/envutil", @@ -80,6 +86,8 @@ go_test( "v24_2_delete_version_tenant_settings_test.go", "v24_2_tenant_rates_test.go", "v24_2_tenant_system_tables_test.go", + "v24_3_sql_instances_add_draining_test.go", + "v24_3_table_metadata_system_table_test.go", "version_starvation_test.go", ], data = glob(["testdata/**"]), @@ -93,12 +101,14 @@ go_test( "//pkg/jobs/jobstest", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvclient/rangefeed", "//pkg/roachpb", "//pkg/scheduledjobs", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", + "//pkg/server/settingswatcher", "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog", @@ -115,10 +125,14 @@ go_test( "//pkg/sql/catalog/schematelemetry/schematelemetrycontroller", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/enum", "//pkg/sql/isql", "//pkg/sql/privilege", "//pkg/sql/sem/builtins/builtinconstants", "//pkg/sql/sem/tree", + "//pkg/sql/sqlinstance/instancestorage", + "//pkg/sql/sqlliveness/slstorage", + "//pkg/sql/sqlliveness/sqllivenesstestutils", "//pkg/sql/types", "//pkg/testutils", "//pkg/testutils/serverutils", @@ -132,7 +146,9 @@ go_test( "//pkg/util/log", "//pkg/util/protoutil", "//pkg/util/retry", + "//pkg/util/stop", "//pkg/util/timeutil", + "//pkg/util/uuid", "@com_github_cockroachdb_cockroach_go_v2//crdb", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//assert", diff --git a/pkg/upgrade/upgrades/permanent_create_update_table_metadata_cache_job.go b/pkg/upgrade/upgrades/permanent_create_update_table_metadata_cache_job.go new file mode 100644 index 000000000000..013043533cd4 --- /dev/null +++ b/pkg/upgrade/upgrades/permanent_create_update_table_metadata_cache_job.go @@ -0,0 +1,44 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package upgrades + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/sql/isql" + _ "github.com/cockroachdb/cockroach/pkg/sql/tablemetadatacache" // Ensure job implementation is linked. + "github.com/cockroachdb/cockroach/pkg/upgrade" +) + +func createUpdateTableMetadataCacheJob( + ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, +) error { + if d.TestingKnobs != nil && d.TestingKnobs.SkipUpdateTableMetadataCacheBootstrap { + return nil + } + + return d.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + jr := jobs.Record{ + JobID: jobs.UpdateTableMetadataCacheJobID, + Description: jobspb.TypeUpdateTableMetadataCache.String(), + Details: jobspb.UpdateTableMetadataCacheDetails{}, + Progress: jobspb.UpdateTableMetadataCacheProgress{}, + CreatedBy: &jobs.CreatedByInfo{Name: username.NodeUser, ID: username.NodeUserID}, + Username: username.NodeUserName(), + NonCancelable: true, + } + return d.JobRegistry.CreateIfNotExistAdoptableJobWithTxn(ctx, jr, txn) + }) +} diff --git a/pkg/upgrade/upgrades/permanent_upgrades.go b/pkg/upgrade/upgrades/permanent_upgrades.go index 61c874cb9f06..82aedb163733 100644 --- a/pkg/upgrade/upgrades/permanent_upgrades.go +++ b/pkg/upgrade/upgrades/permanent_upgrades.go @@ -76,6 +76,7 @@ func bootstrapCluster( {"create jobs metrics polling job", createJobsMetricsPollingJob}, {"create sql activity updater job", createActivityUpdateJobMigration}, {"create mvcc stats job", createMVCCStatisticsJob}, + {"create update cached table metadata job", createUpdateTableMetadataCacheJob}, } { log.Infof(ctx, "executing bootstrap step %q", u.name) if err := u.fn(ctx, cv, deps); err != nil { diff --git a/pkg/upgrade/upgrades/upgrades.go b/pkg/upgrade/upgrades/upgrades.go index c68a9597abf6..4161d2a6bdfc 100644 --- a/pkg/upgrade/upgrades/upgrades.go +++ b/pkg/upgrade/upgrades/upgrades.go @@ -107,6 +107,30 @@ var upgrades = []upgradebase.Upgrade{ upgrade.RestoreActionNotRequired("this zone config isn't necessary for restore"), ), + upgrade.NewTenantUpgrade( + "add new table_metadata table and job to the system tenant", + clusterversion.V24_3_TableMetadata.Version(), + upgrade.NoPrecondition, + addTableMetadataTableAndJob, + upgrade.RestoreActionNotRequired("cluster restore does not restore this table"), + ), + + upgrade.NewTenantUpgrade( + "add exclude_data_from_backup to certain system tables on tenants", + clusterversion.V24_3_TenantExcludeDataFromBackup.Version(), + upgrade.NoPrecondition, + tenantExcludeDataFromBackup, + upgrade.RestoreActionNotRequired("cluster restore does not restore affected tables"), + ), + + upgrade.NewTenantUpgrade( + "add new column to the system.sql_instances table to store whether a node is draining", + clusterversion.V24_3_SQLInstancesAddDraining.Version(), + upgrade.NoPrecondition, + sqlInstancesAddDrainingMigration, + upgrade.RestoreActionNotRequired("cluster restore does not restore the new field"), + ), + // Note: when starting a new release version, the first upgrade (for // Vxy_zStart) must be a newFirstUpgrade. Keep this comment at the bottom. } diff --git a/pkg/upgrade/upgrades/v24_2_tenant_system_tables_test.go b/pkg/upgrade/upgrades/v24_2_tenant_system_tables_test.go index b0797752f42a..cb790e79bd32 100644 --- a/pkg/upgrade/upgrades/v24_2_tenant_system_tables_test.go +++ b/pkg/upgrade/upgrades/v24_2_tenant_system_tables_test.go @@ -36,7 +36,7 @@ func TestCreateTenantSystemTables(t *testing.T) { // Set up the storage cluster at v1. v1 := clusterversion.MinSupported.Version() - v2 := clusterversion.Latest.Version() + v2 := clusterversion.V24_2_TenantSystemTables.Version() settings := cluster.MakeTestingClusterSettingsWithVersions( v2, diff --git a/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining.go b/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining.go new file mode 100644 index 000000000000..c46b1be12f4d --- /dev/null +++ b/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining.go @@ -0,0 +1,48 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package upgrades + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/upgrade" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +// sqlInstancesAddDrainingMigration adds a new column `is_draining` to the +// system.sql_instances table. +func sqlInstancesAddDrainingMigration( + ctx context.Context, cs clusterversion.ClusterVersion, deps upgrade.TenantDeps, +) error { + finalDescriptor := systemschema.SQLInstancesTable() + // Replace the stored descriptor with the bootstrap descriptor. + err := deps.DB.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + expectedDesc := finalDescriptor.TableDesc() + mutableDesc, err := txn.Descriptors().MutableByID(txn.KV()).Table(ctx, expectedDesc.GetID()) + if err != nil { + return err + } + version := mutableDesc.Version + mutableDesc.TableDescriptor = *protoutil.Clone(expectedDesc).(*descpb.TableDescriptor) + mutableDesc.Version = version + return txn.Descriptors().WriteDesc(ctx, false, mutableDesc, txn.KV()) + }) + if err != nil { + return errors.Wrapf(err, "unable to replace system descriptor for system.%s (%+v)", + finalDescriptor.GetName(), finalDescriptor) + } + return err +} diff --git a/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining_test.go b/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining_test.go new file mode 100644 index 000000000000..90b28246771e --- /dev/null +++ b/pkg/upgrade/upgrades/v24_3_sql_instances_add_draining_test.go @@ -0,0 +1,201 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package upgrades_test + +import ( + "context" + gosql "database/sql" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/settingswatcher" + "github.com/cockroachdb/cockroach/pkg/sql/enum" + "github.com/cockroachdb/cockroach/pkg/sql/sqlinstance/instancestorage" + "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slstorage" + "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/sqllivenesstestutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/upgrade/upgrades" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/stretchr/testify/require" +) + +func TestSQLInstancesAddIsDraining(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + ts, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + DisableAutomaticVersionUpgrade: make(chan struct{}), + ClusterVersionOverride: clusterversion.MinSupported.Version(), + }, + }, + }) + defer ts.Stopper().Stop(ctx) + + _, err := sqlDB.Exec("SELECT is_draining FROM system.sql_instances") + require.Error(t, err, "system.sql_instances is_draining columns should not exist") + upgrades.Upgrade(t, sqlDB, clusterversion.V24_3_SQLInstancesAddDraining, nil, false) + _, err = sqlDB.Exec("SELECT is_draining FROM system.sql_instances") + require.NoError(t, err, "system.sql_instances is_draining columns should exist") +} + +func TestCreateInstancesAndUpgrades(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + setupServer := func(t *testing.T) (serverutils.TestServerInterface, *gosql.DB, *kv.DB) { + return serverutils.StartServer(t, base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + DisableAutomaticVersionUpgrade: make(chan struct{}), + ClusterVersionOverride: clusterversion.MinSupported.Version(), + }, + }, + }) + } + + setup := func(t *testing.T, ts serverutils.TestServerInterface, kvDB *kv.DB) ( + *stop.Stopper, *instancestorage.Storage, *hlc.Clock) { + s := ts.ApplicationLayer() + const preallocatedCount = 5 + instancestorage.PreallocatedCount.Override(ctx, &s.ClusterSettings().SV, preallocatedCount) + clock := hlc.NewClockForTesting(nil) + stopper := stop.NewStopper() + slStorage := slstorage.NewFakeStorage() + f := s.RangeFeedFactory().(*rangefeed.Factory) + storage := instancestorage.NewStorage(kvDB, s.Codec(), slStorage, s.ClusterSettings(), + s.Clock(), f, s.SettingsWatcher().(*settingswatcher.SettingsWatcher)) + return stopper, storage, clock + } + + makeSession := func() *sqllivenesstestutils.FakeSession { + sessionID, err := slstorage.MakeSessionID(enum.One, uuid.MakeV4()) + if err != nil { + panic(err) + } + return &sqllivenesstestutils.FakeSession{SessionID: sessionID} + } + + t.Run("create-instance-before-and-after-upgrade", func(t *testing.T) { + ts, sqlDB, kvDB := setupServer(t) + defer ts.Stopper().Stop(ctx) + stopper, storage, clock := setup(t, ts, kvDB) + defer stopper.Stop(ctx) + const rpcAddr = "rpcAddr" + const sqlAddr = "sqlAddr" + locality := roachpb.Locality{Tiers: []roachpb.Tier{{Key: "region", Value: "test"}, {Key: "az", Value: "a"}}} + binaryVersion := roachpb.Version{Major: 28, Minor: 4} + const expiration = time.Minute + + createInstance := func(id base.SQLInstanceID) { + session := makeSession() + session.StartTS = clock.Now() + session.ExpTS = session.StartTS.Add(expiration.Nanoseconds(), 0) + instance, err := storage.CreateNodeInstance( + ctx, session, rpcAddr, sqlAddr, locality, binaryVersion, roachpb.NodeID(id)) + require.NoError(t, err) + require.Equal(t, id, instance.InstanceID) + } + + const firstId = base.SQLInstanceID(1000) + createInstance(firstId) + upgrades.Upgrade(t, sqlDB, clusterversion.V24_3_SQLInstancesAddDraining, nil, false) + const secondId = base.SQLInstanceID(1001) + createInstance(secondId) + + // Verify the rows using SQL + r := sqlutils.MakeSQLRunner(sqlDB) + r.CheckQueryResults(t, + `SELECT id, is_draining FROM system.sql_instances WHERE id >= 1000`, + [][]string{{"1000", "NULL"}, {"1001", "false"}}) + + // Verify the rows by decoding key-values + instances, err := storage.GetAllInstancesDataForTest(ctx) + require.NoError(t, err) + instancestorage.SortInstances(instances) + require.Equal(t, 3, len(instances)) // Instead with id 1 also exists + require.Equal(t, base.SQLInstanceID(1), instances[0].GetInstanceID()) + require.Equal(t, firstId, instances[1].GetInstanceID()) + require.Equal(t, false, instances[1].IsDraining) + require.Equal(t, secondId, instances[2].GetInstanceID()) + require.Equal(t, false, instances[2].IsDraining) + }) + + t.Run("old-readers-reading-new-schema-values", func(t *testing.T) { + ts, sqlDB, kvDB := setupServer(t) + defer ts.Stopper().Stop(ctx) + stopper, storage, clock := setup(t, ts, kvDB) + defer stopper.Stop(ctx) + const rpcAddr = "rpcAddr" + const sqlAddr = "sqlAddr" + locality := roachpb.Locality{Tiers: []roachpb.Tier{{Key: "region", Value: "test"}, {Key: "az", Value: "a"}}} + binaryVersion := roachpb.Version{Major: 28, Minor: 4} + const expiration = time.Minute + // createInstance bypasses version guard and directly writes the key-value + // to storage as it uses CreateInstanceDataForTest instead of + // CreateNodeInstance. + createInstance := func(id base.SQLInstanceID, encodeIsDraining bool, isDraining bool) { + session := makeSession() + err := storage.CreateInstanceDataForTest( + ctx, enum.One, id, rpcAddr, sqlAddr, + session.SessionID, clock.Now().Add(expiration.Nanoseconds(), 0), + locality, binaryVersion, encodeIsDraining, isDraining) + require.NoError(t, err) + } + + const firstId = base.SQLInstanceID(1000) + createInstance(firstId, true, true) + const secondId = base.SQLInstanceID(1001) + createInstance(secondId, true, false) + const thirdId = base.SQLInstanceID(1002) + createInstance(thirdId, false, false) + + // This demonstrates that readers (SQL, instancereader) on an older + // cluster version do not break if they encounter a new schema value + // before the cluster upgrade. This provides additional confidence on + // top of the version guard we already have. + _, err := sqlDB.Exec("SELECT is_draining FROM system.sql_instances") + require.Error(t, err, "system.sql_instances is_draining columns should not exist") + // Verify the rows using SQL + r := sqlutils.MakeSQLRunner(sqlDB) + r.CheckQueryResults(t, + `SELECT id, sql_addr FROM system.sql_instances WHERE id >= 1000`, + [][]string{{"1000", sqlAddr}, {"1001", sqlAddr}, {"1002", sqlAddr}}) + + // Verify the rows by decoding key-values + instances, err := storage.GetAllInstancesDataForTest(ctx) + require.NoError(t, err) + instancestorage.SortInstances(instances) + require.Equal(t, 4, len(instances)) // Instead with id 1 also exists + require.Equal(t, base.SQLInstanceID(1), instances[0].GetInstanceID()) + require.Equal(t, firstId, instances[1].GetInstanceID()) + require.Equal(t, true, instances[1].IsDraining) + require.Equal(t, secondId, instances[2].GetInstanceID()) + require.Equal(t, false, instances[2].IsDraining) + require.Equal(t, thirdId, instances[3].GetInstanceID()) + require.Equal(t, false, instances[3].IsDraining) + }) +} diff --git a/pkg/upgrade/upgrades/v24_3_table_metadata_system_table.go b/pkg/upgrade/upgrades/v24_3_table_metadata_system_table.go new file mode 100644 index 000000000000..b4786fafa4d8 --- /dev/null +++ b/pkg/upgrade/upgrades/v24_3_table_metadata_system_table.go @@ -0,0 +1,36 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package upgrades + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/upgrade" +) + +// addTableMetadataTableAndJob creates the system.table_metadata table if it does not exist. +func addTableMetadataTableAndJob( + ctx context.Context, version clusterversion.ClusterVersion, d upgrade.TenantDeps, +) error { + if err := createSystemTable(ctx, d.DB, d.Settings, d.Codec, systemschema.TableMetadata, tree.LocalityLevelTable); err != nil { + return err + } + + // Add job to backfill the table metadata table. + if err := createUpdateTableMetadataCacheJob(ctx, version, d); err != nil { + return err + } + + return nil +} diff --git a/pkg/upgrade/upgrades/v24_3_table_metadata_system_table_test.go b/pkg/upgrade/upgrades/v24_3_table_metadata_system_table_test.go new file mode 100644 index 000000000000..9ddf5c555647 --- /dev/null +++ b/pkg/upgrade/upgrades/v24_3_table_metadata_system_table_test.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package upgrades_test + +import ( + "context" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/upgrade/upgrades" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +func TestAddTableMetadataTable(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + clusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + DisableAutomaticVersionUpgrade: make(chan struct{}), + ClusterVersionOverride: clusterversion.MinSupported.Version(), + }, + }, + }, + } + + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 1, clusterArgs) + defer tc.Stopper().Stop(ctx) + s, sqlDB := tc.Server(0), tc.ServerConn(0) + + require.True(t, s.ExecutorConfig().(sql.ExecutorConfig).Codec.ForSystemTenant()) + _, err := sqlDB.Exec("SELECT * FROM system.table_metadata") + require.Error(t, err, "system.table_metadata should not exist") + upgrades.Upgrade(t, sqlDB, clusterversion.V24_3_TableMetadata, nil, false) + _, err = sqlDB.Exec("SELECT * FROM system.table_metadata") + require.NoError(t, err, "system.table_metadata") +} diff --git a/pkg/upgrade/upgrades/v24_3_tenant_exclude_data_from_backup.go b/pkg/upgrade/upgrades/v24_3_tenant_exclude_data_from_backup.go new file mode 100644 index 000000000000..e5242af66404 --- /dev/null +++ b/pkg/upgrade/upgrades/v24_3_tenant_exclude_data_from_backup.go @@ -0,0 +1,57 @@ +// Copyright 2024 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package upgrades + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/upgrade" + "github.com/cockroachdb/errors" +) + +// tenantExcludeDataFromBackup marks some system tables with low GC +// TTL as excluded from backup. This step is only performed on +// non-system tenants, as a similar migration was already performed on +// the system tenant in the 24.1 cycle (see #120144). +func tenantExcludeDataFromBackup( + ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, +) error { + if d.Codec.ForSystemTenant() { + return nil + } + + return d.DB.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + for _, tableName := range []catconstants.SystemTableName{ + catconstants.ReplicationConstraintStatsTableName, + catconstants.ReplicationStatsTableName, + catconstants.TenantUsageTableName, + catconstants.LeaseTableName, + catconstants.SpanConfigurationsTableName, + } { + if _, err := txn.ExecEx( + ctx, + "mark-table-excluded-from-backup", + txn.KV(), + sessiondata.NodeUserSessionDataOverride, + fmt.Sprintf("ALTER TABLE system.public.%s SET (exclude_data_from_backup = true)", tableName), + ); err != nil { + return errors.Wrapf(err, "failed to set exclude_data_from_backup on table %s", tableName) + } + } + + return nil + }) +} diff --git a/pkg/util/admission/BUILD.bazel b/pkg/util/admission/BUILD.bazel index d8027a4e0650..a64d1c77eeda 100644 --- a/pkg/util/admission/BUILD.bazel +++ b/pkg/util/admission/BUILD.bazel @@ -25,6 +25,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", + "//pkg/raft/raftpb", "//pkg/roachpb", "//pkg/settings", "//pkg/settings/cluster", diff --git a/pkg/util/admission/admissionpb/admissionpb.go b/pkg/util/admission/admissionpb/admissionpb.go index 53d7f01c311d..86bb5ac3b18d 100644 --- a/pkg/util/admission/admissionpb/admissionpb.go +++ b/pkg/util/admission/admissionpb/admissionpb.go @@ -27,8 +27,8 @@ type WorkPriority int8 const ( // LowPri is low priority work. LowPri WorkPriority = math.MinInt8 - // TTLLowPri is low priority work from TTL internal submissions. - TTLLowPri WorkPriority = -100 + // BulkLowPri is low priority work from internal bulk submissions. + BulkLowPri WorkPriority = -100 // UserLowPri is low priority work from user submissions (SQL). UserLowPri WorkPriority = -50 // BulkNormalPri is bulk priority work from bulk jobs, which could be run due @@ -67,7 +67,7 @@ func (w WorkPriority) SafeFormat(p redact.SafePrinter, verb rune) { // name is used as the suffix on exported work queue metrics. var WorkPriorityDict = map[WorkPriority]string{ LowPri: "low-pri", - TTLLowPri: "ttl-low-pri", + BulkLowPri: "bulk-low-pri", UserLowPri: "user-low-pri", BulkNormalPri: "bulk-normal-pri", NormalPri: "normal-pri", @@ -104,7 +104,7 @@ func init() { orderedPris := []WorkPriority{ LowPri, - TTLLowPri, + BulkLowPri, UserLowPri, BulkNormalPri, NormalPri, @@ -216,7 +216,7 @@ func (w WorkClass) SafeFormat(p redact.SafePrinter, verb rune) { // Prevent the linter from emitting unused warnings. var _ = LowPri -var _ = TTLLowPri +var _ = BulkLowPri var _ = UserLowPri var _ = NormalPri var _ = UserHighPri diff --git a/pkg/util/admission/grant_coordinator.go b/pkg/util/admission/grant_coordinator.go index 7863c1f4ead2..8f650ca44878 100644 --- a/pkg/util/admission/grant_coordinator.go +++ b/pkg/util/admission/grant_coordinator.go @@ -66,10 +66,10 @@ type StoreGrantCoordinators struct { // numStores is used to track the number of stores which have been added // to the gcMap. This is used because the IntMap doesn't expose a size // api. - numStores int - pebbleMetricsProvider PebbleMetricsProvider - onLogEntryAdmitted OnLogEntryAdmitted - closeCh chan struct{} + numStores int + setPebbleMetricsProviderCalled bool + onLogEntryAdmitted OnLogEntryAdmitted + closeCh chan struct{} disableTickerForTesting bool // TODO(irfansharif): Fold into the testing knobs struct below. knobs *TestingKnobs @@ -80,12 +80,13 @@ type StoreGrantCoordinators struct { func (sgc *StoreGrantCoordinators) SetPebbleMetricsProvider( startupCtx context.Context, pmp PebbleMetricsProvider, iotc IOThresholdConsumer, ) { - if sgc.pebbleMetricsProvider != nil { + if sgc.setPebbleMetricsProviderCalled { panic(errors.AssertionFailedf("SetPebbleMetricsProvider called more than once")) } - sgc.pebbleMetricsProvider = pmp + sgc.setPebbleMetricsProviderCalled = true + pebbleMetricsProvider := pmp sgc.closeCh = make(chan struct{}) - metrics := sgc.pebbleMetricsProvider.GetPebbleMetrics() + metrics := pebbleMetricsProvider.GetPebbleMetrics() for _, m := range metrics { gc := sgc.initGrantCoordinator(m.StoreID) // Defensive call to LoadAndStore even though Store ought to be sufficient @@ -115,7 +116,7 @@ func (sgc *StoreGrantCoordinators) SetPebbleMetricsProvider( select { default: if remainingTicks == 0 { - metrics := sgc.pebbleMetricsProvider.GetPebbleMetrics() + metrics := pebbleMetricsProvider.GetPebbleMetrics() if len(metrics) != sgc.numStores { log.Warningf(ctx, "expected %d store metrics and found %d metrics", sgc.numStores, len(metrics)) @@ -146,6 +147,7 @@ func (sgc *StoreGrantCoordinators) SetPebbleMetricsProvider( }) case <-sgc.closeCh: done = true + pebbleMetricsProvider.Close() } } ticker.stop() @@ -457,7 +459,7 @@ func makeStoresGrantCoordinators( admissionpb.NormalPri, admissionpb.LockingNormalPri) elasticStoreWorkQueueMetrics := makeWorkQueueMetrics(fmt.Sprintf("%s-stores", admissionpb.ElasticWorkClass), registry, - admissionpb.TTLLowPri, admissionpb.BulkNormalPri) + admissionpb.BulkLowPri, admissionpb.BulkNormalPri) storeWorkQueueMetrics := [admissionpb.NumWorkClasses]*WorkQueueMetrics{ regularStoreWorkQueueMetrics, elasticStoreWorkQueueMetrics, } diff --git a/pkg/util/admission/granter.go b/pkg/util/admission/granter.go index 16ecb8179a3c..24c29f5d39e6 100644 --- a/pkg/util/admission/granter.go +++ b/pkg/util/admission/granter.go @@ -648,6 +648,7 @@ func (sg *kvStoreTokenGranter) storeReplicatedWorkAdmittedLocked( // PebbleMetricsProvider provides the pebble.Metrics for all stores. type PebbleMetricsProvider interface { GetPebbleMetrics() []StoreMetrics + Close() } // IOThresholdConsumer is informed about updated IOThresholds. diff --git a/pkg/util/admission/granter_test.go b/pkg/util/admission/granter_test.go index 80c9af590dc5..71a25509660f 100644 --- a/pkg/util/admission/granter_test.go +++ b/pkg/util/admission/granter_test.go @@ -487,6 +487,8 @@ func (m *testMetricsProvider) GetPebbleMetrics() []StoreMetrics { return m.metrics } +func (m *testMetricsProvider) Close() {} + func (m *testMetricsProvider) UpdateIOThreshold( id roachpb.StoreID, threshold *admissionpb.IOThreshold, ) { @@ -504,14 +506,7 @@ func (m *testMetricsProvider) setMetricsForStores(stores []int32, metrics pebble type noopOnLogEntryAdmitted struct{} -func (n *noopOnLogEntryAdmitted) AdmittedLogEntry( - context.Context, - roachpb.NodeID, - admissionpb.WorkPriority, - roachpb.StoreID, - roachpb.RangeID, - LogPosition, -) { +func (n *noopOnLogEntryAdmitted) AdmittedLogEntry(context.Context, LogEntryAdmittedCallbackState) { } var _ OnLogEntryAdmitted = &noopOnLogEntryAdmitted{} diff --git a/pkg/util/admission/work_queue.go b/pkg/util/admission/work_queue.go index 8fc2654eb561..5459d7a45d33 100644 --- a/pkg/util/admission/work_queue.go +++ b/pkg/util/admission/work_queue.go @@ -22,6 +22,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/raft/raftpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -215,13 +216,22 @@ type ReplicatedWorkInfo struct { // RangeID identifies the raft group on behalf of which work is being // admitted. RangeID roachpb.RangeID + // Replica that asked for admission. + ReplicaID roachpb.ReplicaID + // LeaderTerm is the term of the leader that asked for this entry to be + // appended. + LeaderTerm uint64 + // LogPosition is the point on the raft log where the write was replicated. + LogPosition LogPosition // Origin is the node at which this work originated. It's used for // replication admission control to inform the origin of admitted work // (after which flow tokens are released, permitting more replicated - // writes). + // writes). Only populated for RACv1. Origin roachpb.NodeID - // LogPosition is the point on the raft log where the write was replicated. - LogPosition LogPosition + // RaftPri is the raft priority of the entry. Only populated for RACv2. + RaftPri raftpb.Priority + // IsV2Protocol is true iff the v2 protocol requested this admission. + IsV2Protocol bool // Ingested captures whether the write work corresponds to an ingest // (for sstables, for example). This is used alongside RequestedCount to // maintain accurate linear models for L0 growth due to ingests and @@ -284,12 +294,7 @@ type WorkQueue struct { onAdmittedReplicatedWork onAdmittedReplicatedWork - // Prevents more than one caller to be in Admit and calling tryGet or adding - // to the queue. It allows WorkQueue to release mu before calling tryGet and - // be assured that it is not competing with another Admit. - // Lock ordering is admitMu < mu. - admitMu syncutil.Mutex - mu struct { + mu struct { syncutil.Mutex // Tenants with waiting work. tenantHeap tenantHeap @@ -597,11 +602,10 @@ func (q *WorkQueue) Admit(ctx context.Context, info WorkInfo) (enabled bool, err q.metrics.incRequested(info.Priority) tenantID := info.TenantID.ToUint64() - // The code in this method does not use defer to unlock the mutexes because - // it needs the flexibility of selectively unlocking one of these on a - // certain code path. When changing the code, be careful in making sure the - // mutexes are properly unlocked on all code paths. - q.admitMu.Lock() + // The code in this method does not use defer to unlock the mutex because it + // needs the flexibility of selectively unlocking on a certain code path. + // When changing the code, be careful in making sure the mutex is properly + // unlocked on all code paths. q.mu.Lock() tenant, ok := q.mu.tenants[tenantID] if !ok { @@ -629,7 +633,6 @@ func (q *WorkQueue) Admit(ctx context.Context, info WorkInfo) (enabled bool, err q.mu.tenantHeap.fix(tenant) } q.mu.Unlock() - q.admitMu.Unlock() q.granter.tookWithoutPermission(info.RequestedCount) q.metrics.incAdmitted(info.Priority) q.metrics.recordBypassedAdmission(info.Priority) @@ -647,8 +650,10 @@ func (q *WorkQueue) Admit(ctx context.Context, info WorkInfo) (enabled bool, err // Optimistically update used to avoid locking again. tenant.used += uint64(info.RequestedCount) q.mu.Unlock() + // We have unlocked q.mu, so another concurrent request can also do tryGet + // and get ahead of this request. We don't need to be fair for such + // concurrent requests. if q.granter.tryGet(info.RequestedCount) { - q.admitMu.Unlock() q.metrics.incAdmitted(info.Priority) if info.ReplicatedWorkInfo.Enabled { // TODO(irfansharif): There's a race here, and could lead to @@ -723,7 +728,6 @@ func (q *WorkQueue) Admit(ctx context.Context, info WorkInfo) (enabled bool, err // Already canceled. More likely to happen if cpu starvation is // causing entering into the work queue to be delayed. q.mu.Unlock() - q.admitMu.Unlock() q.metrics.incErrored(info.Priority) deadline, _ := ctx.Deadline() return true, @@ -749,9 +753,8 @@ func (q *WorkQueue) Admit(ctx context.Context, info WorkInfo) (enabled bool, err } // Else already in tenantHeap. - // Release all locks. + // Release the lock. q.mu.Unlock() - q.admitMu.Unlock() q.metrics.recordStartWait(info.Priority) if info.ReplicatedWorkInfo.Enabled { @@ -2085,29 +2088,59 @@ func (q *StoreWorkQueue) admittedReplicatedWork( // revisit -- one possibility is to add this to a notification queue and // have a separate goroutine invoke these callbacks (without holding // coord.mu). We could directly invoke here too if not holding the lock. - q.onLogEntryAdmitted.AdmittedLogEntry( - q.q[wc].ambientCtx, - rwi.Origin, - pri, - q.storeID, - rwi.RangeID, - rwi.LogPosition, - ) -} - -// OnLogEntryAdmitted is used to observe the specific entries (identified by -// rangeID + log position) that were admitted. Since admission control for log -// entries is asynchronous/non-blocking, this allows callers to do requisite + cbState := LogEntryAdmittedCallbackState{ + StoreID: q.storeID, + RangeID: rwi.RangeID, + ReplicaID: rwi.ReplicaID, + LeaderTerm: rwi.LeaderTerm, + Pos: rwi.LogPosition, + Pri: pri, + Origin: rwi.Origin, + RaftPri: rwi.RaftPri, + IsV2Protocol: rwi.IsV2Protocol, + } + q.onLogEntryAdmitted.AdmittedLogEntry(q.q[wc].ambientCtx, cbState) +} + +// OnLogEntryAdmitted is used to observe the specific entries that were +// admitted. Since admission control for log entries is +// asynchronous/non-blocking, this allows callers to do requisite // post-admission bookkeeping. type OnLogEntryAdmitted interface { - AdmittedLogEntry( - ctx context.Context, - origin roachpb.NodeID, /* node where the entry originated */ - pri admissionpb.WorkPriority, /* admission priority of the entry */ - storeID roachpb.StoreID, /* store on which the entry was admitted */ - rangeID roachpb.RangeID, /* identifying range for the log entry */ - pos LogPosition, /* log position of the entry that was admitted*/ - ) + AdmittedLogEntry(ctx context.Context, cbState LogEntryAdmittedCallbackState) +} + +// LogEntryAdmittedCallbackState is passed to AdmittedLogEntry. +type LogEntryAdmittedCallbackState struct { + // Store on which the entry was admitted. + StoreID roachpb.StoreID + // Range that contained that entry. + RangeID roachpb.RangeID + // Replica that asked for admission. + ReplicaID roachpb.ReplicaID + // LeaderTerm is the term of the leader that asked for this entry to be + // appended. + LeaderTerm uint64 + // Pos is the position of the entry in the log. + // + // TODO(sumeer): when the RACv1 protocol is deleted, drop the Term from this + // struct, and replace LeaderTerm/Pos.Index with a LogMark. + Pos LogPosition + // Pri is the admission priority used for admission. + Pri admissionpb.WorkPriority + // Origin is the node where the entry originated. It is only populated for + // replication admission control v1 (RACv1). + Origin roachpb.NodeID + // RaftPri is only populated for replication admission control v2 (RACv2). + // It is the raft priority for the entry. Technically, it could be derived + // from Pri, but we do not want the admission package to be aware of this + // translation. + RaftPri raftpb.Priority + // IsV2Protocol is true iff the v2 protocol requested this admission. It is + // used for de-multiplexing the callback correctly. + // + // TODO(sumeer): remove when the RACv1 protocol is deleted. + IsV2Protocol bool } // AdmittedWorkDone indicates to the queue that the admitted work has completed. diff --git a/pkg/util/cidr/BUILD.bazel b/pkg/util/cidr/BUILD.bazel index 0c656dc0efd6..e47a06e04927 100644 --- a/pkg/util/cidr/BUILD.bazel +++ b/pkg/util/cidr/BUILD.bazel @@ -9,6 +9,8 @@ go_library( "//pkg/settings", "//pkg/util/envutil", "//pkg/util/log", + "//pkg/util/metric", + "//pkg/util/metric/aggmetric", "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/timeutil", diff --git a/pkg/util/cidr/cidr.go b/pkg/util/cidr/cidr.go index ecd75ad5b3c2..5a8349d96858 100644 --- a/pkg/util/cidr/cidr.go +++ b/pkg/util/cidr/cidr.go @@ -12,7 +12,9 @@ package cidr import ( "context" + "crypto/tls" "encoding/json" + "fmt" io "io" "net" "net/http" @@ -24,6 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/metric/aggmetric" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -326,3 +330,140 @@ func (c *Lookup) LookupIP(ip net.IP) string { } return "" } + +type childNetMetrics struct { + WriteBytes *aggmetric.Counter + ReadBytes *aggmetric.Counter +} + +// NetMetrics are aggregate metrics around net.Conn mapped based on the CIDR lookup. +type NetMetrics struct { + lookup *Lookup + WriteBytes *aggmetric.AggCounter + ReadBytes *aggmetric.AggCounter + + mu struct { + syncutil.Mutex + childMetrics map[string]childNetMetrics + } +} + +var _ metric.Struct = (*NetMetrics)(nil) + +// MetricStruct implements the metric.Struct interface. +func (m *NetMetrics) MetricStruct() {} + +// MakeNetMetrics makes a new NetMetrics object with the given metric metadata. +func (c *Lookup) MakeNetMetrics(metaWrite, metaRead metric.Metadata, labels ...string) *NetMetrics { + labels = append(labels, "remote") + nm := &NetMetrics{ + lookup: c, + WriteBytes: aggmetric.NewCounter(metaWrite, labels...), + ReadBytes: aggmetric.NewCounter(metaRead, labels...), + } + nm.mu.childMetrics = make(map[string]childNetMetrics) + return nm +} + +// DialContext is shorthand for the type of net.Conn.DialContext. +type DialContext func(ctx context.Context, network, host string) (net.Conn, error) + +// Wrap returns a DialContext that wraps the connection with metrics. +func (m *NetMetrics) Wrap(dial DialContext, labels ...string) DialContext { + return func(ctx context.Context, network, host string) (net.Conn, error) { + conn, err := dial(ctx, network, host) + if err != nil { + return conn, err + } + return m.track(conn, labels...), nil + } +} + +// WrapTLS is like Wrap, but can be used if the underlying library doesn't +// expose a way to plug in a dialer for TLS connections. This is unfortunately +// pretty ugly... Copied from tls.Dial and kgo.DialTLS because they don't expose +// a dial call with a DialContext. Ideally you don't have to use this if the +// third party API does a sensible thing and exposes the ability to replace the +// "DialContext" directly. +func (m *NetMetrics) WrapTLS(dial DialContext, tlsCfg *tls.Config, labels ...string) DialContext { + return func(ctx context.Context, network, host string) (net.Conn, error) { + c := tlsCfg.Clone() + if c.ServerName == "" { + server, _, err := net.SplitHostPort(host) + if err != nil { + return nil, fmt.Errorf("unable to split host:port for dialing: %w", err) + } + c.ServerName = server + } + + rawConn, err := dial(ctx, network, host) + if err != nil { + return nil, err + } + scopedConn := m.track(rawConn, labels...) + + conn := tls.Client(rawConn, c) + if err := conn.HandshakeContext(ctx); err != nil { + scopedConn.Close() + return nil, err + } + return conn, nil + } +} + +// track converts a connection to a wrapped connection with the given labels. +func (m *NetMetrics) track(conn net.Conn, labels ...string) metricsConn { + var remote string + if ip, ok := conn.RemoteAddr().(*net.TCPAddr); ok { + remote = m.lookup.LookupIP(ip.IP) + } + labels = append(labels, remote) + key := strings.Join(labels, "/") + + m.mu.Lock() + defer m.mu.Unlock() + nm, ok := m.mu.childMetrics[key] + if !ok { + nm = childNetMetrics{ + WriteBytes: m.WriteBytes.AddChild(labels...), + ReadBytes: m.ReadBytes.AddChild(labels...), + } + m.mu.childMetrics[key] = nm + } + + return metricsConn{ + Conn: conn, + WriteBytes: nm.WriteBytes.Inc, + ReadBytes: nm.ReadBytes.Inc, + } +} + +// metricsConn wraps a net.Conn and increments the metrics on read and write. +// +// NB: If the cost of incrementing the metrics on every read and write is too +// expensive, we could track the metrics internally and flush them periodically +// or when the connection is closed. +// NB: The metrics are cached with the connection, but potentially the cidr +// mapping could change under us. Since we don't expect "indefinite" connections +// we are OK with slightly stale metrics. +type metricsConn struct { + net.Conn + WriteBytes func(int64) + ReadBytes func(int64) +} + +func (c metricsConn) Read(b []byte) (n int, err error) { + n, err = c.Conn.Read(b) + if err == nil && n > 0 { + c.ReadBytes(int64(n)) + } + return n, err +} + +func (c metricsConn) Write(b []byte) (n int, err error) { + n, err = c.Conn.Write(b) + if err == nil && n > 0 { + c.WriteBytes(int64(n)) + } + return n, err +} diff --git a/pkg/util/cidr/cidr_test.go b/pkg/util/cidr/cidr_test.go index 37c4a717ac53..04eaa01bb39d 100644 --- a/pkg/util/cidr/cidr_test.go +++ b/pkg/util/cidr/cidr_test.go @@ -143,9 +143,14 @@ func TestRefresh(t *testing.T) { // We haven't set the URL yet, so it should return an empty string. require.Equal(t, "", c.LookupIP(net.ParseIP("127.0.0.1"))) - // Set the URL to the file we created. Verify it takes effect immediately. + // Set the URL to the file we created. Verify it takes effect quickly. cidrMappingUrl.Override(context.Background(), &st.SV, "file://"+filename) - require.Equal(t, "loopback", c.LookupIP(net.ParseIP("127.0.0.1"))) + testutils.SucceedsSoon(t, func() error { + if c.LookupIP(net.ParseIP("127.0.0.1")) != "loopback" { + return errors.New("not refreshed") + } + return nil + }) cidrRefreshInterval.Override(context.Background(), &st.SV, time.Second) diff --git a/pkg/util/duration/BUILD.bazel b/pkg/util/duration/BUILD.bazel index b7bc40eab6ba..0ffb5ddd2807 100644 --- a/pkg/util/duration/BUILD.bazel +++ b/pkg/util/duration/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/sql/pgwire/pgerror", "//pkg/sql/types", "//pkg/util/arith", + "//pkg/util/timeutil/pgdate", "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/util/duration/duration.go b/pkg/util/duration/duration.go index edfda0e51013..af40e819c281 100644 --- a/pkg/util/duration/duration.go +++ b/pkg/util/duration/duration.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/arith" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" "github.com/cockroachdb/errors" ) @@ -736,6 +737,11 @@ func Decode(sortNanos int64, months int64, days int64) (Duration, error) { // Add returns the time t+d, using a configurable mode. func Add(t time.Time, d Duration) time.Time { + if t == pgdate.TimeInfinity || t == pgdate.TimeNegativeInfinity { + // "infinity"/"-infinity" add/subtract any duration results in itself. + return t + } + // Fast path adding units < 1 day. // Avoiding AddDate(0,0,0) is required to prevent changing times // on DST boundaries. diff --git a/pkg/util/leaktest/leaktest.go b/pkg/util/leaktest/leaktest.go index 04f63ba9ed8a..102f3ed659a4 100644 --- a/pkg/util/leaktest/leaktest.go +++ b/pkg/util/leaktest/leaktest.go @@ -64,6 +64,10 @@ func interestingGoroutines() map[int64]string { strings.Contains(stack, "github.com/jackc/pgconn/internal/ctxwatch.(*ContextWatcher).Watch.func1") || // Ignore pq goroutine that watches for context cancellation. strings.Contains(stack, "github.com/lib/pq.(*conn).watchCancel") || + // Ignore TLS handshake related goroutine. + // TODO(pritesh-lahoti): Revisit this once Go is updated to 1.23, as this seems to have been + // fixed: https://github.com/golang/go/pull/62227. + strings.Contains(stack, "net/http.(*persistConn).addTLS") || // Seems to be gccgo specific. (runtime.Compiler == "gccgo" && strings.Contains(stack, "testing.T.Parallel")) || // Ignore intentionally long-running logging goroutines that live for the diff --git a/pkg/util/log/formats.go b/pkg/util/log/formats.go index 6d05e892fad9..ff5715bd400e 100644 --- a/pkg/util/log/formats.go +++ b/pkg/util/log/formats.go @@ -32,7 +32,8 @@ type logFormatter interface { contentType() string } -var formatParsers = map[string]string{ +// FormatParsers maps the user facing format names to the internal representation. +var FormatParsers = map[string]string{ "crdb-v1": "v1", "crdb-v1-count": "v1", "crdb-v1-tty": "v1", diff --git a/pkg/util/log/gen/main.go b/pkg/util/log/gen/main.go index 132c877a623b..08f359dddb35 100644 --- a/pkg/util/log/gen/main.go +++ b/pkg/util/log/gen/main.go @@ -422,7 +422,7 @@ func (logger{{.Name}}) Shoutf(ctx context.Context, sev Severity, format string, // verbosity level is active. {{.Comment -}} func (logger{{.Name}}) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.{{.NAME}}, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.{{.NAME}}, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the diff --git a/pkg/util/log/log_channels_generated.go b/pkg/util/log/log_channels_generated.go index 88424883d83d..20f4f9912c61 100644 --- a/pkg/util/log/log_channels_generated.go +++ b/pkg/util/log/log_channels_generated.go @@ -972,7 +972,7 @@ func (loggerDev) Shoutf(ctx context.Context, sev Severity, format string, args . // sensitive operational data. // See [Configure logs](configure-logs.html#dev-channel). func (loggerDev) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.DEV, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.DEV, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -1504,7 +1504,7 @@ func (loggerOps) Shoutf(ctx context.Context, sev Severity, format string, args . // - [Cluster setting](cluster-settings.html) changes // - [Zone configuration](configure-replication-zones.html) changes func (loggerOps) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.OPS, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.OPS, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -1939,7 +1939,7 @@ func (loggerHealth) Shoutf(ctx context.Context, sev Severity, format string, arg // - Range and table leasing events // - Up- and down-replication, range unavailability func (loggerHealth) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.HEALTH, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.HEALTH, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -2248,7 +2248,7 @@ func (loggerStorage) Shoutf(ctx context.Context, sev Severity, format string, ar // The `STORAGE` channel is used to report low-level storage // layer events (RocksDB/Pebble). func (loggerStorage) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.STORAGE, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.STORAGE, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -2705,7 +2705,7 @@ func (loggerSessions) Shoutf(ctx context.Context, sev Severity, format string, a // This is typically configured in "audit" mode, with event // numbering and synchronous writes. func (loggerSessions) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SESSIONS, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SESSIONS, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -3238,7 +3238,7 @@ func (loggerSqlSchema) Shoutf(ctx context.Context, sev Severity, format string, // `SQL_SCHEMA` events generally comprise changes to the schema that affect the // functional behavior of client apps using stored objects. func (loggerSqlSchema) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SQL_SCHEMA, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SQL_SCHEMA, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -3717,7 +3717,7 @@ func (loggerUserAdmin) Shoutf(ctx context.Context, sev Severity, format string, // This is typically configured in "audit" mode, with event // numbering and synchronous writes. func (loggerUserAdmin) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.USER_ADMIN, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.USER_ADMIN, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -4150,7 +4150,7 @@ func (loggerPrivileges) Shoutf(ctx context.Context, sev Severity, format string, // This is typically configured in "audit" mode, with event // numbering and synchronous writes. func (loggerPrivileges) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.PRIVILEGES, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.PRIVILEGES, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -4659,7 +4659,7 @@ func (loggerSensitiveAccess) Shoutf(ctx context.Context, sev Severity, format st // This is typically configured in "audit" mode, with event // numbering and synchronous writes. func (loggerSensitiveAccess) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SENSITIVE_ACCESS, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SENSITIVE_ACCESS, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -5056,7 +5056,7 @@ func (loggerSqlExec) Shoutf(ctx context.Context, sev Severity, format string, ar // `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) // - uncaught Go panic errors during the execution of a SQL statement. func (loggerSqlExec) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SQL_EXEC, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -5501,7 +5501,7 @@ func (loggerSqlPerf) Shoutf(ctx context.Context, sev Severity, format string, ar // with versions prior to v21.1, where the corresponding events // were redirected to separate files. func (loggerSqlPerf) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SQL_PERF, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SQL_PERF, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -5852,7 +5852,7 @@ func (loggerSqlInternalPerf) Shoutf(ctx context.Context, sev Severity, format st // channel so as to not pollute the `SQL_PERF` logging output with // internal troubleshooting details. func (loggerSqlInternalPerf) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.SQL_INTERNAL_PERF, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.SQL_INTERNAL_PERF, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -6173,7 +6173,7 @@ func (loggerTelemetry) Shoutf(ctx context.Context, sev Severity, format string, // feature usage within CockroachDB and anonymizes any application- // specific data. func (loggerTelemetry) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.TELEMETRY, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.TELEMETRY, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the @@ -6492,7 +6492,7 @@ func (loggerKvDistribution) Shoutf(ctx context.Context, sev Severity, format str // replicas between stores in the cluster, or adding (removing) replicas to // ranges. func (loggerKvDistribution) VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.KV_DISTRIBUTION, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.KV_DISTRIBUTION, "%s", msg) } // VEventf either logs a message to the channel (which also outputs to the diff --git a/pkg/util/log/log_decoder.go b/pkg/util/log/log_decoder.go index 9432edb0da8a..1d35b58c80c3 100644 --- a/pkg/util/log/log_decoder.go +++ b/pkg/util/log/log_decoder.go @@ -78,7 +78,7 @@ func NewEntryDecoderWithFormat( } in = io.MultiReader(read, in) } - f, ok := formatParsers[format] + f, ok := FormatParsers[format] if !ok { return nil, errors.Newf("unknown log file format: %s", format) } diff --git a/pkg/util/log/registry.go b/pkg/util/log/registry.go index 22f972daf063..8b74dff98d06 100644 --- a/pkg/util/log/registry.go +++ b/pkg/util/log/registry.go @@ -81,8 +81,12 @@ func (r *sinkInfoRegistry) iter(fn func(l *sinkInfo) error) error { func (r *sinkInfoRegistry) iterFileSinks(fn func(l *fileSink) error) error { return r.iter(func(si *sinkInfo) error { if fs, ok := si.sink.(*fileSink); ok { - if err := fn(fs); err != nil { - return err + return fn(fs) + } + + if bs, ok := si.sink.(*bufferedSink); ok { + if fs, ok := bs.child.(*fileSink); ok { + return fn(fs) } } return nil diff --git a/pkg/util/log/registry_test.go b/pkg/util/log/registry_test.go index 8ef2e6bedea3..f1fd09b527af 100644 --- a/pkg/util/log/registry_test.go +++ b/pkg/util/log/registry_test.go @@ -20,15 +20,102 @@ import ( "github.com/stretchr/testify/require" ) +var ( + oneMicro = 1 * time.Microsecond + oneByte = logconfig.ByteSize(1) + oneThousandBytes = logconfig.ByteSize(1000) + notBuffered = false + bufferedAddr = "buffered.sink.com" +) + +func TestIterFileSinks(t *testing.T) { + defer leaktest.AfterTest(t)() + sc := ScopeWithoutShowLogs(t) + defer sc.Close(t) + + // Set up a log config containing a file sink. + cfg := logconfig.DefaultConfig() + + cfg.Sinks.FileGroups = map[string]*logconfig.FileSinkConfig{ + "unbuffered": { + Channels: logconfig.SelectChannels(channel.OPS), + FileDefaults: logconfig.FileDefaults{}, + }, + "buffered": { + Channels: logconfig.SelectChannels(channel.DEV), + FileDefaults: logconfig.FileDefaults{ + BufferedWrites: ¬Buffered, + CommonSinkConfig: logconfig.CommonSinkConfig{ + Buffering: logconfig.CommonBufferSinkConfigWrapper{ + CommonBufferSinkConfig: logconfig.CommonBufferSinkConfig{ + MaxStaleness: &oneMicro, + MaxBufferSize: &oneThousandBytes, + FlushTriggerSize: &oneByte, + }, + }, + }, + }, + }, + } + + // add an HTTP sink to make sure the iteration doesn't pick up all the + // buffered sinks. It should only pick up the file sinks. + cfg.Sinks.HTTPServers = map[string]*logconfig.HTTPSinkConfig{ + "buffered-http": { + Channels: logconfig.SelectChannels(channel.OPS), + HTTPDefaults: logconfig.HTTPDefaults{ + Address: &bufferedAddr, + CommonSinkConfig: logconfig.CommonSinkConfig{ + Buffering: logconfig.CommonBufferSinkConfigWrapper{ + CommonBufferSinkConfig: logconfig.CommonBufferSinkConfig{ + MaxStaleness: &oneMicro, + MaxBufferSize: &oneThousandBytes, + FlushTriggerSize: &oneByte, + }, + }, + }, + }, + }, + } + + require.NoError(t, cfg.Validate(&sc.logDir)) + + // Apply the configuration + TestingResetActive() + cleanup, err := ApplyConfig(cfg, nil /* fileSinkMetricsForDir */, nil /* fatalOnLogStall */) + require.NoError(t, err) + defer cleanup() + + callMap := map[string]bool{ + "logtest-stderr": false, + "logtest-unbuffered": false, + "logtest-buffered": false, + } + + fn := func(fs *fileSink) error { + require.NotEqual( + t, fs.nameGenerator.fileNamePrefix, + "logtest-buffered-http", "unexpected fileSink %q", fs.nameGenerator.fileNamePrefix, + ) + require.False( + t, callMap[fs.nameGenerator.fileNamePrefix], "fileSink %q was called twice", fs.nameGenerator.fileNamePrefix, + ) + + callMap[fs.nameGenerator.fileNamePrefix] = true + return nil + } + require.NoError(t, logging.allSinkInfos.iterFileSinks(fn)) + + for k, v := range callMap { + require.Truef(t, v, "fileSink %q was never called during iteration", k) + } +} + func TestIterHTTPSinks(t *testing.T) { defer leaktest.AfterTest(t)() sc := ScopeWithoutShowLogs(t) defer sc.Close(t) - oneMicro := 1 * time.Microsecond - oneByte := logconfig.ByteSize(1) - oneThousandBytes := logconfig.ByteSize(1000) - bufferedAddr := "buffered.sink.com" unbufferedAddr := "unbuffered.sink.com" // Set up a log config containing both buffered and unbuffered HTTP sinks. diff --git a/pkg/util/log/trace.go b/pkg/util/log/trace.go index eb7d7602e734..fb1e0f9d6fa4 100644 --- a/pkg/util/log/trace.go +++ b/pkg/util/log/trace.go @@ -139,7 +139,7 @@ func vEventf( // active trace) or to the trace alone, depending on whether the specified // verbosity level is active. func VEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, false /* isErr */, 1, level, channel.DEV, msg) + vEventf(ctx, false /* isErr */, 1, level, channel.DEV, "%s", msg) } // VEventf either logs a message to the DEV channel (which also outputs to the @@ -159,7 +159,7 @@ func VEventfDepth(ctx context.Context, depth int, level Level, format string, ar // outputs to the active trace) or to the trace alone, depending on whether // the specified verbosity level is active. func VErrEvent(ctx context.Context, level Level, msg string) { - vEventf(ctx, true /* isErr */, 1, level, channel.DEV, msg) + vEventf(ctx, true /* isErr */, 1, level, channel.DEV, "%s", msg) } // VErrEventf either logs an error message to the DEV Channel (which also diff --git a/pkg/util/rangedesc/testdata/scoped_iteration b/pkg/util/rangedesc/testdata/scoped_iteration index d5fa182e5b88..078981466538 100644 --- a/pkg/util/rangedesc/testdata/scoped_iteration +++ b/pkg/util/rangedesc/testdata/scoped_iteration @@ -67,35 +67,36 @@ scan - r65:/Table/6{3-4} - r66:/Table/6{4-5} - r67:/Table/6{5-6} -- r68:/{Table/66-Max} -scan through /M{in-ax} (page-size=1) found 68/68 descriptors +- r68:/Table/6{6-7} +- r69:/{Table/67-Max} +scan through /M{in-ax} (page-size=1) found 69/69 descriptors scan scope=1 ---- - r2:/System/NodeLiveness{-Max} -scan through /System/NodeLiveness{-Max} (page-size=1) found 1/68 descriptors +scan through /System/NodeLiveness{-Max} (page-size=1) found 1/69 descriptors scan scope=2 ---- - r4:/System{/tsd-tse} -scan through /System{/tsd-tse} (page-size=1) found 1/68 descriptors +scan through /System{/tsd-tse} (page-size=1) found 1/69 descriptors scan scope=3 ---- - r1:/{Meta1-System/NodeLiveness} -scan through /M{in-eta2} (page-size=1) found 1/68 descriptors +scan through /M{in-eta2} (page-size=1) found 1/69 descriptors scan scope=4 ---- - r1:/{Meta1-System/NodeLiveness} -scan through /{Meta1-System} (page-size=1) found 1/68 descriptors +scan through /{Meta1-System} (page-size=1) found 1/69 descriptors scan scope=5 ---- - r7:/Table/{3-4} - r8:/Table/{4-5} - r9:/Table/{5-6} -scan through /Table/{3-6} (page-size=1) found 3/68 descriptors +scan through /Table/{3-6} (page-size=1) found 3/69 descriptors scan scope=6 ---- @@ -109,7 +110,7 @@ scan scope=6 - r48:/Table/4{5-6} - r49:/Table/4{6-7} - r50:/Table/4{7-8} -scan through /Table/{38-48} (page-size=1) found 10/68 descriptors +scan through /Table/{38-48} (page-size=1) found 10/69 descriptors scan scope=7 ---- @@ -175,5 +176,6 @@ scan scope=7 - r65:/Table/6{3-4} - r66:/Table/6{4-5} - r67:/Table/6{5-6} -- r68:/{Table/66-Max} -scan through /Table/{0-Max} (page-size=1) found 63/68 descriptors +- r68:/Table/6{6-7} +- r69:/{Table/67-Max} +scan through /Table/{0-Max} (page-size=1) found 64/69 descriptors diff --git a/pkg/util/rangedesc/testdata/scoped_iteration_with_page_size b/pkg/util/rangedesc/testdata/scoped_iteration_with_page_size index 12b1f7bc8e61..8e8e25d6d7a6 100644 --- a/pkg/util/rangedesc/testdata/scoped_iteration_with_page_size +++ b/pkg/util/rangedesc/testdata/scoped_iteration_with_page_size @@ -67,18 +67,19 @@ scan page-size=3 - r65:/Table/6{3-4} - r66:/Table/6{4-5} - r67:/Table/6{5-6} -- r68:/{Table/66-Max} -scan through /M{in-ax} (page-size=3) found 68/68 descriptors +- r68:/Table/6{6-7} +- r69:/{Table/67-Max} +scan through /M{in-ax} (page-size=3) found 69/69 descriptors scan scope=1 page-size=5 ---- - r2:/System/NodeLiveness{-Max} -scan through /System/NodeLiveness{-Max} (page-size=5) found 1/68 descriptors +scan through /System/NodeLiveness{-Max} (page-size=5) found 1/69 descriptors scan scope=1 page-size=500 ---- - r2:/System/NodeLiveness{-Max} -scan through /System/NodeLiveness{-Max} (page-size=500) found 1/68 descriptors +scan through /System/NodeLiveness{-Max} (page-size=500) found 1/69 descriptors scan scope=6 page-size=4 ---- @@ -92,4 +93,4 @@ scan scope=6 page-size=4 - r48:/Table/4{5-6} - r49:/Table/4{6-7} - r50:/Table/4{7-8} -scan through /Table/{38-48} (page-size=4) found 10/68 descriptors +scan through /Table/{38-48} (page-size=4) found 10/69 descriptors diff --git a/pkg/util/rangedesc/testdata/scoped_iteration_with_splits b/pkg/util/rangedesc/testdata/scoped_iteration_with_splits index 642306e5d31f..f0f6cc0a9f57 100644 --- a/pkg/util/rangedesc/testdata/scoped_iteration_with_splits +++ b/pkg/util/rangedesc/testdata/scoped_iteration_with_splits @@ -7,8 +7,8 @@ splitting at /Meta2/"middle" scan ---- - r1:/Meta{1-2} -- r69:/Meta2{-/middle} -- r70:/{Meta2/middle-System/NodeLiveness} +- r70:/Meta2{-/middle} +- r71:/{Meta2/middle-System/NodeLiveness} - r2:/System/NodeLiveness{-Max} - r3:/System/{NodeLivenessMax-tsd} - r4:/System{/tsd-tse} @@ -75,19 +75,20 @@ scan - r65:/Table/6{3-4} - r66:/Table/6{4-5} - r67:/Table/6{5-6} -- r68:/{Table/66-Max} -scan through /M{in-ax} (page-size=1) found 70/70 descriptors +- r68:/Table/6{6-7} +- r69:/{Table/67-Max} +scan through /M{in-ax} (page-size=1) found 71/71 descriptors # Scanning through just meta1 is unnaffected by these splits. scan scope=3 ---- - r1:/Meta{1-2} -scan through /M{in-eta2} (page-size=1) found 1/70 descriptors +scan through /M{in-eta2} (page-size=1) found 1/71 descriptors # Scanning through both meta{1,2} does surface these descriptors. scan scope=4 page-size=2 ---- - r1:/Meta{1-2} -- r69:/Meta2{-/middle} -- r70:/{Meta2/middle-System/NodeLiveness} -scan through /{Meta1-System} (page-size=2) found 3/70 descriptors +- r70:/Meta2{-/middle} +- r71:/{Meta2/middle-System/NodeLiveness} +scan through /{Meta1-System} (page-size=2) found 3/71 descriptors diff --git a/pkg/util/timeutil/pgdate/parsing.go b/pkg/util/timeutil/pgdate/parsing.go index cfba80ca6922..6787ee12e58f 100644 --- a/pkg/util/timeutil/pgdate/parsing.go +++ b/pkg/util/timeutil/pgdate/parsing.go @@ -66,20 +66,44 @@ var ( // https://www.postgresql.org/docs/10/static/datatype-datetime.html#DATATYPE-DATETIME-SPECIAL-TABLE var ( TimeEpoch = timeutil.Unix(0, 0) - // TimeInfinity represents the "highest" possible time. - // TODO (#41564): this should actually behave as infinity, i.e. any operator - // leaves this as infinity. This time should always be greater than any other time. - // We should probably use the next microsecond after this value, i.e. timeutil.Unix(9224318016000, 0). + // TimeInfinity represents the "highest" possible time. Its value is + // "294277-01-01 23:59:59.999999 +0000 UTC", which is 24 hours after "MaxSupportedTime" + // (294276-12-31 23:59:59.999999 +0000 UTC). + // + // The "date" of TimeInfinity is one day after "MaxSupportedTime", it's choiced for no + // particular reason. + // + // The "time" of TimeInfinity is set to "23:59:59.999999" to maintain the behavior of + // 'Infinity'::time as it was before PR #127141. (Prior to PR #127141, 'Infinity'::time + // resulted in '23:59:59.999999'.) This behavior may change in the future, see issue #129148 + // for more details. + // // Postgres uses math.MaxInt64 microseconds as the infinity value. - // See: https://github.com/postgres/postgres/blob/42aa1f0ab321fd43cbfdd875dd9e13940b485900/src/include/datatype/timestamp.h#L107. - TimeInfinity = timeutil.Unix(9224318016000-1, 999999000) - // TimeNegativeInfinity represents the "lowest" possible time. - // TODO (#41564): this should actually behave as -infinity, i.e. any operator - // leaves this as -infinity. This time should always be less than any other time. - // We should probably use the next microsecond before this value, i.e. timeutil.Unix(9224318016000-1, 999999000). + // See: https://github.com/postgres/postgres/blob/9380e5f129d2a160ecc2444f61bb7cb97fd51fbb/src/include/datatype/timestamp.h#L157 + // + // Refer to the doc comments of the function "timeutil.Unix" for the process of + // deriving the arguments to construct a specific time.Time. + TimeInfinity = timeutil.Unix(9224318102399 /* sec */, 999999000 /* nsec */) + TimeInfinitySec = float64(TimeInfinity.Unix()) + // TimeNegativeInfinity represents the "lowest" possible time. Its value is + // "-4714-11-23 00:00:00 +0000 UTC", which is 24 hours before "MinSupportedTime" + // ("-4714-11-24 00:00:00 +0000 UTC"). + // + // The "date" of TimeNegativeInfinity is one day before "MinSupportedTime", it's choiced for no + // particular reason. + // + // The "time" of TimeNegativeInfinity is set to "00:00:00" to maintain the behavior of + // '-Infinity'::time as it was before PR #127141. (Prior to PR #127141, '-Infinity'::time + // resulted in '00:00:00'.) This behavior may change in the future, see issue #129148 + // for more details. + // // Postgres uses math.MinInt64 microseconds as the -infinity value. - // See: https://github.com/postgres/postgres/blob/42aa1f0ab321fd43cbfdd875dd9e13940b485900/src/include/datatype/timestamp.h#L107. - TimeNegativeInfinity = timeutil.Unix(-210866803200, 0) + // See: https://github.com/postgres/postgres/blob/9380e5f129d2a160ecc2444f61bb7cb97fd51fbb/src/include/datatype/timestamp.h#L156 + // + // Refer to the doc comments of the function "timeutil.Unix" for the process of + // deriving the arguments to construct a specific time.Time. + TimeNegativeInfinity = timeutil.Unix(-210898425600 /* sec */, 0 /* nsec */) + TimeNegativeInfinitySec = float64(TimeNegativeInfinity.Unix()) ) type ParseHelper struct { diff --git a/pkg/util/timeutil/time.go b/pkg/util/timeutil/time.go index 281d6b868a0d..b7e92eaf2c71 100644 --- a/pkg/util/timeutil/time.go +++ b/pkg/util/timeutil/time.go @@ -112,6 +112,16 @@ func ToUnixMicros(t time.Time) int64 { } // Unix wraps time.Unix ensuring that the result is in UTC instead of Local. +// +// The process of deriving the args to construct a specific time.Time: +// +// // say we want to construct timestamp "294277-01-01 23:59:59.999999 +0000 UTC" +// tm := time.Date(294277, 1, 1, 23, 59, 59, 999999000, time.UTC) +// // get the args of "timeutil.Unix" +// sec := tm.Unix() +// nsec := int64(tm.Nanosecond()) +// // verify +// fmt.Println(tm == time.Unix(sec, nsec).UTC()) func Unix(sec, nsec int64) time.Time { return time.Unix(sec, nsec).UTC() } diff --git a/pkg/workload/schemachange/error_screening.go b/pkg/workload/schemachange/error_screening.go index fa15c490faf1..7cbcd0d869f5 100644 --- a/pkg/workload/schemachange/error_screening.go +++ b/pkg/workload/schemachange/error_screening.go @@ -680,7 +680,7 @@ func (og *operationGenerator) generateColumn( str := tree.AsStringWithFlags(tree.NewDString(*val[0][0]), tree.FmtParsable) return str, nil } - return fmt.Sprintf("'" + *val[0][0] + "'::" + colInfo.typ.SQLString()), nil + return "'" + *val[0][0] + "'::" + colInfo.typ.SQLString(), nil } return "NULL", nil } diff --git a/pkg/workload/schemachange/operation_generator.go b/pkg/workload/schemachange/operation_generator.go index 52a68b367d00..105fba06842a 100644 --- a/pkg/workload/schemachange/operation_generator.go +++ b/pkg/workload/schemachange/operation_generator.go @@ -1707,7 +1707,7 @@ func (og *operationGenerator) dropColumnDefault(ctx context.Context, tx pgx.Tx) } stmt := makeOpStmt(OpStmtDDL) - stmt.expectedExecErrors.addAll(codesWithConditions{ + stmt.potentialExecErrors.addAll(codesWithConditions{ {code: pgcode.UndefinedColumn, condition: !columnExists}, {code: pgcode.Syntax, condition: colIsVirtualComputed || colIsStoredComputed}, }) @@ -4066,7 +4066,8 @@ FROM if typeVal.Identical(types.AnyTuple) || typeVal.IsWildcardType() || typeVal == types.RegClass || - typeVal.Family() == types.OidFamily { + typeVal.Family() == types.OidFamily || + typeVal.Family() == types.VoidFamily { continue } if pgVectorNotSupported && typeVal.Family() == types.PGVectorFamily { diff --git a/scripts/drtprod b/scripts/drtprod index b4eed8dbb6d5..71a85167d2f5 100755 --- a/scripts/drtprod +++ b/scripts/drtprod @@ -45,9 +45,13 @@ case $1 in case $cluster in "drt-large") shift - set -- start "--binary" "./cockroach" --args=--log="file-defaults: {dir: 'logs', max-group-size: 1GiB}" --store-count=16 --restart=false "$@" + set -- start "--binary" "./cockroach" --args=--log="file-defaults: {dir: 'logs', max-group-size: 1GiB}" --store-count=16 --restart=false --args="--wal-failover=among-stores" "$@" roachprod run $cluster -- "sudo systemctl unmask cron.service ; sudo systemctl enable cron.service ; echo \"crontab -l ; echo '@reboot sleep 100 && ~/cockroach.sh' | crontab -\" > t.sh ; sh t.sh ; rm t.sh" ;; + "drt-chaos") + shift + set -- start "--binary" "./cockroach" --args=--log="file-defaults: {dir: 'logs', max-group-size: 1GiB}" --store-count=4 --args="--wal-failover=among-stores" "$@" + ;; "drt-ldr1"|"drt-ldr2") shift set -- start "--binary" "./cockroach" --args=--log="file-defaults: {dir: 'logs', max-group-size: 1GiB}" --restart=false "$@" @@ -536,11 +540,21 @@ EOF" $0 start drt-ldr2 --restart=false $0 sql drt-ldr1:1 -- -e "SET CLUSTER SETTING kv.rangefeed.enabled = true" + $0 sql drt-ldr1:1 -- -e "SET CLUSTER SETTING sql.ttl.default_delete_batch_size = 1000" + $0 sql drt-ldr1:1 -- -e "SET CLUSTER SETTING sql.ttl.default_delete_rate_limit = 2000" + $0 sql drt-ldr1:1 -- -e "SET CLUSTER SETTING sql.ttl.default_select_batch_size = 5000" + $0 sql drt-ldr2:1 -- -e "SET CLUSTER SETTING kv.rangefeed.enabled = true" + $0 sql drt-ldr2:1 -- -e "SET CLUSTER SETTING sql.ttl.default_delete_batch_size = 1000" + $0 sql drt-ldr2:1 -- -e "SET CLUSTER SETTING sql.ttl.default_delete_rate_limit = 2000" + $0 sql drt-ldr2:1 -- -e "SET CLUSTER SETTING sql.ttl.default_select_batch_size = 5000" # import the workload $0 sql drt-ldr1:1 -- -e "CREATE DATABASE ycsb" + $0 sql drt-ldr1:1 -- -e "ALTER DATABASE ycsb CONFIGURE ZONE USING gc.ttlseconds = 600" + $0 sql drt-ldr2:1 -- -e "CREATE DATABASE ycsb" + $0 sql drt-ldr2:1 -- -e "ALTER DATABASE ycsb CONFIGURE ZONE USING gc.ttlseconds = 600" $0 ssh drt-ldr1:1 "./cockroach workload init ycsb --workload=A --insert-count=1000 --families=false {pgurl:1}" $0 ssh drt-ldr2:1 "./cockroach workload init ycsb --workload=A --insert-count=1000 --families=false --insert-start=4511686018427387904 {pgurl:1}" @@ -551,7 +565,13 @@ EOF" $0 sql drt-ldr1:1 -- -e "CREATE EXTERNAL CONNECTION 'drt-ldr2' AS '${ldr2}'" $0 sql drt-ldr2:1 -- -e "CREATE EXTERNAL CONNECTION 'drt-ldr1' AS '${ldr1}'" + $0 sql drt-ldr1:1 -- -e "ALTER TABLE ycsb.public.usertable ADD COLUMN expired_at TIMESTAMPTZ NOT NULL DEFAULT now() + '30 minutes';" + # set row level ttl + $0 sql drt-ldr1:1 -- -e "ALTER TABLE ycsb.public.usertable SET (ttl_expiration_expression = 'expired_at', ttl_job_cron = '*/30 * * * *');" $0 sql drt-ldr1:1 -- -e "ALTER TABLE ycsb.public.usertable ADD COLUMN crdb_replication_origin_timestamp DECIMAL NOT VISIBLE DEFAULT NULL ON UPDATE NULL;" + + $0 sql drt-ldr2:1 -- -e "ALTER TABLE ycsb.public.usertable ADD COLUMN expired_at TIMESTAMPTZ NOT NULL DEFAULT now() + '30 minutes';" + $0 sql drt-ldr2:1 -- -e "ALTER TABLE ycsb.public.usertable SET (ttl_expiration_expression = 'expired_at', ttl_job_cron = '*/30 * * * *');" $0 sql drt-ldr2:1 -- -e "ALTER TABLE ycsb.public.usertable ADD COLUMN crdb_replication_origin_timestamp DECIMAL NOT VISIBLE DEFAULT NULL ON UPDATE NULL;" roachprod sql drt-ldr1:1 -- -e "CREATE LOGICAL REPLICATION STREAM FROM TABLE usertable ON 'external://drt-ldr2' INTO TABLE ycsb.public.usertable;" @@ -606,7 +626,8 @@ exec ./cockroach workload run ycsb \\ --insert-start=0 \\ --families=false \\ --tolerate-errors \\ - --workload='custom' --read-freq=0.1 --read-modify-write-freq 0.1 --insert-freq 0.3 --scan-freq 0.1 --update-freq 0.2 --delete-freq 0.2 \\ + --request-distribution=uniform \\ + --workload='custom' --read-freq=0.2 --read-modify-write-freq 0.1 --insert-freq 0.3 --scan-freq 0.1 --update-freq 0.2 --delete-freq 0.1 \\ \$(cat /home/ubuntu/pgurls.ldr1.txt) EOF' roachprod ssh workload-ldr:1 -- 'cat - > ycsb_run_ldr2.sh << EOF @@ -618,7 +639,8 @@ exec ./cockroach workload run ycsb \\ --tolerate-errors \\ --families=false \\ --insert-start=4611686018427387904 \\ - --workload='custom' --read-freq=0.2 --read-modify-write-freq 0.1 --insert-freq 0.3 --scan-freq 0.1 --update-freq 0.1 --delete-freq 0.2 \\ + --request-distribution=uniform \\ + --workload='custom' --read-freq=0.2 --read-modify-write-freq 0.1 --insert-freq 0.3 --scan-freq 0.1 --update-freq 0.2 --delete-freq 0.1 \\ \$(cat /home/ubuntu/pgurls.ldr2.txt) EOF' roachprod ssh workload-ldr:1 -- "chmod +x ./ycsb_run_ldr1.sh; diff --git a/scripts/ldr b/scripts/ldr index c103a66dbc19..dcd6170a9368 100755 --- a/scripts/ldr +++ b/scripts/ldr @@ -77,9 +77,9 @@ case $1 in "create") shift roachprod create $A \ - --clouds gce --gce-machine-type n2-standard-16 --nodes 3 --username "$USER" --lifetime 24h "$@" + --clouds gce --gce-machine-type n2-standard-16 --local-ssd=false --nodes 3 --username "$USER" --lifetime 24h "$@" roachprod create $B \ - --clouds gce --gce-machine-type n2-standard-16 --nodes 3 --username "$USER" --lifetime 24h "$@" + --clouds gce --gce-machine-type n2-standard-16 --local-ssd=false --nodes 3 --username "$USER" --lifetime 24h "$@" $0 stage cockroach $0 stage workload ;; @@ -142,9 +142,9 @@ case $1 in start_a=1000000 start_b=4611686018427387904 shift - roachprod run $A:1 "env -i nohup ./workload run ycsb --tolerate-errors --families=false --duration=24h --concurrency=16 --ramp=5s \ + roachprod run $A:1 "env -i nohup ./workload run ycsb --tolerate-errors --families=false --duration=24h --concurrency=75 --max-rate=7500 --ramp=5s \ --workload='custom' --insert-freq=1.0 --insert-start=${start_a} $@ $(roachprod pgurl $A) > $OUTPUT_FILE_A 2> $OUTPUT_FILE_A &" & - roachprod run $B:1 "env -i nohup ./workload run ycsb --tolerate-errors --families=false --duration=24h --concurrency=16 --ramp=5s \ + roachprod run $B:1 "env -i nohup ./workload run ycsb --tolerate-errors --families=false --duration=24h --concurrency=75 --max-rate=7500 --ramp=5s \ --workload='custom' --insert-freq=1.0 --insert-start=${start_b} $@ $(roachprod pgurl $B) > $OUTPUT_FILE_B 2> $OUTPUT_FILE_B &" & ;; "stop") diff --git a/testDump.raw b/testDump.raw deleted file mode 100644 index dfc6c435226f..000000000000 Binary files a/testDump.raw and /dev/null differ